1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 27 #include "mlir/IR/BuiltinTypes.h" 28 #include "mlir/IR/Matchers.h" 29 #include "mlir/Pass/Pass.h" 30 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 31 #include "llvm/ADT/ArrayRef.h" 32 33 #define DEBUG_TYPE "flang-codegen" 34 35 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 36 #include "TypeConverter.h" 37 38 // TODO: This should really be recovered from the specified target. 39 static constexpr unsigned defaultAlign = 8; 40 41 /// `fir.box` attribute values as defined for CFI_attribute_t in 42 /// flang/ISO_Fortran_binding.h. 43 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 44 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 45 46 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 47 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 48 } 49 50 static mlir::LLVM::ConstantOp 51 genConstantIndex(mlir::Location loc, mlir::Type ity, 52 mlir::ConversionPatternRewriter &rewriter, 53 std::int64_t offset) { 54 auto cattr = rewriter.getI64IntegerAttr(offset); 55 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 56 } 57 58 static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 59 mlir::Block *insertBefore) { 60 assert(insertBefore && "expected valid insertion block"); 61 return rewriter.createBlock(insertBefore->getParent(), 62 mlir::Region::iterator(insertBefore)); 63 } 64 65 namespace { 66 /// FIR conversion pattern template 67 template <typename FromOp> 68 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 69 public: 70 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 71 const fir::FIRToLLVMPassOptions &options) 72 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 73 74 protected: 75 mlir::Type convertType(mlir::Type ty) const { 76 return lowerTy().convertType(ty); 77 } 78 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 79 80 mlir::Type getVoidPtrType() const { 81 return mlir::LLVM::LLVMPointerType::get( 82 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 83 } 84 85 mlir::LLVM::ConstantOp 86 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 87 int value) const { 88 mlir::Type i32Ty = rewriter.getI32Type(); 89 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 90 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 91 } 92 93 mlir::LLVM::ConstantOp 94 genConstantOffset(mlir::Location loc, 95 mlir::ConversionPatternRewriter &rewriter, 96 int offset) const { 97 mlir::Type ity = lowerTy().offsetType(); 98 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 99 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 100 } 101 102 /// Perform an extension or truncation as needed on an integer value. Lowering 103 /// to the specific target may involve some sign-extending or truncation of 104 /// values, particularly to fit them from abstract box types to the 105 /// appropriate reified structures. 106 mlir::Value integerCast(mlir::Location loc, 107 mlir::ConversionPatternRewriter &rewriter, 108 mlir::Type ty, mlir::Value val) const { 109 auto valTy = val.getType(); 110 // If the value was not yet lowered, lower its type so that it can 111 // be used in getPrimitiveTypeSizeInBits. 112 if (!valTy.isa<mlir::IntegerType>()) 113 valTy = convertType(valTy); 114 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 115 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 116 if (toSize < fromSize) 117 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 118 if (toSize > fromSize) 119 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 120 return val; 121 } 122 123 /// Construct code sequence to extract the specifc value from a `fir.box`. 124 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 125 mlir::Type resultTy, 126 mlir::ConversionPatternRewriter &rewriter, 127 unsigned boxValue) const { 128 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 129 mlir::LLVM::ConstantOp cValuePos = 130 genConstantOffset(loc, rewriter, boxValue); 131 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 132 auto p = rewriter.create<mlir::LLVM::GEPOp>( 133 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 134 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 135 } 136 137 /// Method to construct code sequence to get the triple for dimension `dim` 138 /// from a box. 139 llvm::SmallVector<mlir::Value, 3> 140 getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, 141 mlir::Value box, mlir::Value dim, 142 mlir::ConversionPatternRewriter &rewriter) const { 143 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 144 mlir::LLVM::ConstantOp cDims = 145 genConstantOffset(loc, rewriter, kDimsPosInBox); 146 mlir::LLVM::LoadOp l0 = 147 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 148 mlir::LLVM::LoadOp l1 = 149 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 150 mlir::LLVM::LoadOp l2 = 151 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 152 return {l0.getResult(), l1.getResult(), l2.getResult()}; 153 } 154 155 mlir::LLVM::LoadOp 156 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 157 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 158 mlir::Type ty, 159 mlir::ConversionPatternRewriter &rewriter) const { 160 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 161 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 162 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 163 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 164 } 165 166 mlir::Value 167 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 168 mlir::ConversionPatternRewriter &rewriter) const { 169 auto idxTy = lowerTy().indexType(); 170 auto c0 = genConstantOffset(loc, rewriter, 0); 171 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 172 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 173 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 174 rewriter); 175 } 176 177 /// Read base address from a fir.box. Returned address has type ty. 178 mlir::Value 179 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 180 mlir::ConversionPatternRewriter &rewriter) const { 181 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 182 mlir::LLVM::ConstantOp cAddr = 183 genConstantOffset(loc, rewriter, kAddrPosInBox); 184 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 185 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 186 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 187 } 188 189 mlir::Value 190 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 191 mlir::ConversionPatternRewriter &rewriter) const { 192 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 193 mlir::LLVM::ConstantOp cElemLen = 194 genConstantOffset(loc, rewriter, kElemLenPosInBox); 195 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 196 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 197 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 198 } 199 200 // Get the element type given an LLVM type that is of the form 201 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 202 static mlir::Type getBoxEleTy(mlir::Type type, 203 llvm::ArrayRef<unsigned> indexes) { 204 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 205 type = t.getElementType(); 206 for (auto i : indexes) { 207 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 208 assert(!t.isOpaque() && i < t.getBody().size()); 209 type = t.getBody()[i]; 210 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 211 type = t.getElementType(); 212 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 213 type = t.getElementType(); 214 } else { 215 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 216 "request for invalid box element type"); 217 } 218 } 219 return type; 220 } 221 222 // Return LLVM type of the base address given the LLVM type 223 // of the related descriptor (lowered fir.box type). 224 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 225 return getBoxEleTy(type, {kAddrPosInBox}); 226 } 227 228 // Load the attribute from the \p box and perform a check against \p maskValue 229 // The final comparison is implemented as `(attribute & maskValue) != 0`. 230 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 231 mlir::ConversionPatternRewriter &rewriter, 232 unsigned maskValue) const { 233 mlir::Type attrTy = rewriter.getI32Type(); 234 mlir::Value attribute = 235 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 236 mlir::LLVM::ConstantOp attrMask = 237 genConstantOffset(loc, rewriter, maskValue); 238 auto maskRes = 239 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 240 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 241 return rewriter.create<mlir::LLVM::ICmpOp>( 242 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 243 } 244 245 template <typename... ARGS> 246 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 247 mlir::ConversionPatternRewriter &rewriter, 248 mlir::Value base, ARGS... args) const { 249 llvm::SmallVector<mlir::Value> cv{args...}; 250 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 251 } 252 253 fir::LLVMTypeConverter &lowerTy() const { 254 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 255 } 256 257 const fir::FIRToLLVMPassOptions &options; 258 }; 259 260 /// FIR conversion pattern template 261 template <typename FromOp> 262 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 263 public: 264 using FIROpConversion<FromOp>::FIROpConversion; 265 using OpAdaptor = typename FromOp::Adaptor; 266 267 mlir::LogicalResult 268 matchAndRewrite(FromOp op, OpAdaptor adaptor, 269 mlir::ConversionPatternRewriter &rewriter) const final { 270 mlir::Type ty = this->convertType(op.getType()); 271 return doRewrite(op, ty, adaptor, rewriter); 272 } 273 274 virtual mlir::LogicalResult 275 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 276 mlir::ConversionPatternRewriter &rewriter) const = 0; 277 }; 278 279 // Lower `fir.address_of` operation to `llvm.address_of` operation. 280 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 281 using FIROpConversion::FIROpConversion; 282 283 mlir::LogicalResult 284 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 285 mlir::ConversionPatternRewriter &rewriter) const override { 286 auto ty = convertType(addr.getType()); 287 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 288 addr, ty, addr.getSymbol().getRootReference().getValue()); 289 return mlir::success(); 290 } 291 }; 292 } // namespace 293 294 /// Lookup the function to compute the memory size of this parametric derived 295 /// type. The size of the object may depend on the LEN type parameters of the 296 /// derived type. 297 static mlir::LLVM::LLVMFuncOp 298 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 299 mlir::ConversionPatternRewriter &rewriter) { 300 auto module = op->getParentOfType<mlir::ModuleOp>(); 301 std::string name = recTy.getName().str() + "P.mem.size"; 302 return module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name); 303 } 304 305 // Compute the alloc scale size (constant factors encoded in the array type). 306 // We do this for arrays without a constant interior or arrays of character with 307 // dynamic length arrays, since those are the only ones that get decayed to a 308 // pointer to the element type. 309 template <typename OP> 310 static mlir::Value 311 genAllocationScaleSize(OP op, mlir::Type ity, 312 mlir::ConversionPatternRewriter &rewriter) { 313 mlir::Location loc = op.getLoc(); 314 mlir::Type dataTy = op.getInType(); 315 mlir::Type scalarType = fir::unwrapSequenceType(dataTy); 316 auto seqTy = dataTy.dyn_cast<fir::SequenceType>(); 317 if ((op.hasShapeOperands() && seqTy && !seqTy.hasConstantInterior()) || 318 (seqTy && fir::characterWithDynamicLen(scalarType))) { 319 fir::SequenceType::Extent constSize = 1; 320 for (auto extent : seqTy.getShape()) 321 if (extent != fir::SequenceType::getUnknownExtent()) 322 constSize *= extent; 323 if (constSize != 1) { 324 mlir::Value constVal{ 325 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 326 return constVal; 327 } 328 } 329 return nullptr; 330 } 331 332 namespace { 333 /// convert to LLVM IR dialect `alloca` 334 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 335 using FIROpConversion::FIROpConversion; 336 337 mlir::LogicalResult 338 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 339 mlir::ConversionPatternRewriter &rewriter) const override { 340 mlir::ValueRange operands = adaptor.getOperands(); 341 auto loc = alloc.getLoc(); 342 mlir::Type ity = lowerTy().indexType(); 343 unsigned i = 0; 344 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 345 mlir::Type ty = convertType(alloc.getType()); 346 mlir::Type resultTy = ty; 347 if (alloc.hasLenParams()) { 348 unsigned end = alloc.numLenParams(); 349 llvm::SmallVector<mlir::Value> lenParams; 350 for (; i < end; ++i) 351 lenParams.push_back(operands[i]); 352 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 353 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 354 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 355 chrTy.getContext(), chrTy.getFKind()); 356 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 357 assert(end == 1); 358 size = integerCast(loc, rewriter, ity, lenParams[0]); 359 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 360 mlir::LLVM::LLVMFuncOp memSizeFn = 361 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 362 if (!memSizeFn) 363 emitError(loc, "did not find allocation function"); 364 mlir::NamedAttribute attr = rewriter.getNamedAttr( 365 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 366 auto call = rewriter.create<mlir::LLVM::CallOp>( 367 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 368 size = call.getResult(0); 369 ty = mlir::LLVM::LLVMPointerType::get( 370 mlir::IntegerType::get(alloc.getContext(), 8)); 371 } else { 372 return emitError(loc, "unexpected type ") 373 << scalarType << " with type parameters"; 374 } 375 } 376 if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter)) 377 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 378 if (alloc.hasShapeOperands()) { 379 unsigned end = operands.size(); 380 for (; i < end; ++i) 381 size = rewriter.create<mlir::LLVM::MulOp>( 382 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 383 } 384 if (ty == resultTy) { 385 // Do not emit the bitcast if ty and resultTy are the same. 386 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 387 alloc->getAttrs()); 388 } else { 389 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 390 alloc->getAttrs()); 391 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 392 } 393 return mlir::success(); 394 } 395 }; 396 } // namespace 397 398 /// Construct an `llvm.extractvalue` instruction. It will return value at 399 /// element \p x from \p tuple. 400 static mlir::LLVM::ExtractValueOp 401 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 402 mlir::ConversionPatternRewriter &rewriter, 403 mlir::MLIRContext *ctx, int x) { 404 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 405 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 406 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 407 } 408 409 namespace { 410 /// Lower `fir.box_addr` to the sequence of operations to extract the first 411 /// element of the box. 412 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 413 using FIROpConversion::FIROpConversion; 414 415 mlir::LogicalResult 416 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 417 mlir::ConversionPatternRewriter &rewriter) const override { 418 mlir::Value a = adaptor.getOperands()[0]; 419 auto loc = boxaddr.getLoc(); 420 mlir::Type ty = convertType(boxaddr.getType()); 421 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 422 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 423 } else { 424 auto c0attr = rewriter.getI32IntegerAttr(0); 425 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 426 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 427 c0); 428 } 429 return mlir::success(); 430 } 431 }; 432 433 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 434 /// boxchar. 435 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 436 using FIROpConversion::FIROpConversion; 437 438 mlir::LogicalResult 439 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 440 mlir::ConversionPatternRewriter &rewriter) const override { 441 mlir::Value boxChar = adaptor.getOperands()[0]; 442 mlir::Location loc = boxChar.getLoc(); 443 mlir::MLIRContext *ctx = boxChar.getContext(); 444 mlir::Type returnValTy = boxCharLen.getResult().getType(); 445 446 constexpr int boxcharLenIdx = 1; 447 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 448 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 449 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 450 rewriter.replaceOp(boxCharLen, lenAfterCast); 451 452 return mlir::success(); 453 } 454 }; 455 456 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 457 /// dimension infomartion from the boxed value. 458 /// Result in a triple set of GEPs and loads. 459 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 460 using FIROpConversion::FIROpConversion; 461 462 mlir::LogicalResult 463 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 464 mlir::ConversionPatternRewriter &rewriter) const override { 465 llvm::SmallVector<mlir::Type, 3> resultTypes = { 466 convertType(boxdims.getResult(0).getType()), 467 convertType(boxdims.getResult(1).getType()), 468 convertType(boxdims.getResult(2).getType()), 469 }; 470 auto results = 471 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 472 adaptor.getOperands()[1], rewriter); 473 rewriter.replaceOp(boxdims, results); 474 return mlir::success(); 475 } 476 }; 477 478 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 479 /// an element in the boxed value. 480 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 481 using FIROpConversion::FIROpConversion; 482 483 mlir::LogicalResult 484 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 485 mlir::ConversionPatternRewriter &rewriter) const override { 486 mlir::Value a = adaptor.getOperands()[0]; 487 auto loc = boxelesz.getLoc(); 488 auto ty = convertType(boxelesz.getType()); 489 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 490 rewriter.replaceOp(boxelesz, elemSize); 491 return mlir::success(); 492 } 493 }; 494 495 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 496 /// boxed value was from an ALLOCATABLE entity. 497 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 498 using FIROpConversion::FIROpConversion; 499 500 mlir::LogicalResult 501 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 502 mlir::ConversionPatternRewriter &rewriter) const override { 503 mlir::Value box = adaptor.getOperands()[0]; 504 auto loc = boxisalloc.getLoc(); 505 mlir::Value check = 506 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 507 rewriter.replaceOp(boxisalloc, check); 508 return mlir::success(); 509 } 510 }; 511 512 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 513 /// boxed is an array. 514 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 515 using FIROpConversion::FIROpConversion; 516 517 mlir::LogicalResult 518 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 519 mlir::ConversionPatternRewriter &rewriter) const override { 520 mlir::Value a = adaptor.getOperands()[0]; 521 auto loc = boxisarray.getLoc(); 522 auto rank = 523 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 524 auto c0 = genConstantOffset(loc, rewriter, 0); 525 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 526 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 527 return mlir::success(); 528 } 529 }; 530 531 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 532 /// boxed value was from a POINTER entity. 533 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 534 using FIROpConversion::FIROpConversion; 535 536 mlir::LogicalResult 537 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 538 mlir::ConversionPatternRewriter &rewriter) const override { 539 mlir::Value box = adaptor.getOperands()[0]; 540 auto loc = boxisptr.getLoc(); 541 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 542 rewriter.replaceOp(boxisptr, check); 543 return mlir::success(); 544 } 545 }; 546 547 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 548 /// the box. 549 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 550 using FIROpConversion::FIROpConversion; 551 552 mlir::LogicalResult 553 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 554 mlir::ConversionPatternRewriter &rewriter) const override { 555 mlir::Value a = adaptor.getOperands()[0]; 556 auto loc = boxrank.getLoc(); 557 mlir::Type ty = convertType(boxrank.getType()); 558 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 559 rewriter.replaceOp(boxrank, result); 560 return mlir::success(); 561 } 562 }; 563 564 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 565 /// boxproc. 566 /// TODO: Part of supporting Fortran 2003 procedure pointers. 567 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 568 using FIROpConversion::FIROpConversion; 569 570 mlir::LogicalResult 571 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 572 mlir::ConversionPatternRewriter &rewriter) const override { 573 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 574 return mlir::failure(); 575 } 576 }; 577 578 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 579 /// descriptor from the box. 580 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 581 using FIROpConversion::FIROpConversion; 582 583 mlir::LogicalResult 584 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 585 mlir::ConversionPatternRewriter &rewriter) const override { 586 mlir::Value box = adaptor.getOperands()[0]; 587 auto loc = boxtypedesc.getLoc(); 588 mlir::Type typeTy = 589 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 590 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 591 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 592 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 593 result); 594 return mlir::success(); 595 } 596 }; 597 598 /// Lower `fir.string_lit` to LLVM IR dialect operation. 599 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 600 using FIROpConversion::FIROpConversion; 601 602 mlir::LogicalResult 603 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 604 mlir::ConversionPatternRewriter &rewriter) const override { 605 auto ty = convertType(constop.getType()); 606 auto attr = constop.getValue(); 607 if (attr.isa<mlir::StringAttr>()) { 608 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 609 return mlir::success(); 610 } 611 612 auto charTy = constop.getType().cast<fir::CharacterType>(); 613 unsigned bits = lowerTy().characterBitsize(charTy); 614 mlir::Type intTy = rewriter.getIntegerType(bits); 615 mlir::Location loc = constop.getLoc(); 616 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 617 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) { 618 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr); 619 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) { 620 for (auto a : llvm::enumerate(arr.getValue())) { 621 // convert each character to a precise bitsize 622 auto elemAttr = mlir::IntegerAttr::get( 623 intTy, 624 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits)); 625 auto elemCst = 626 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr); 627 auto index = mlir::ArrayAttr::get( 628 constop.getContext(), rewriter.getI32IntegerAttr(a.index())); 629 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst, 630 index); 631 } 632 } else { 633 return mlir::failure(); 634 } 635 rewriter.replaceOp(constop, cst); 636 return mlir::success(); 637 } 638 }; 639 640 // `fir.call` -> `llvm.call` 641 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 642 using FIROpConversion::FIROpConversion; 643 644 mlir::LogicalResult 645 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 646 mlir::ConversionPatternRewriter &rewriter) const override { 647 llvm::SmallVector<mlir::Type> resultTys; 648 for (auto r : call.getResults()) 649 resultTys.push_back(convertType(r.getType())); 650 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 651 call, resultTys, adaptor.getOperands(), call->getAttrs()); 652 return mlir::success(); 653 } 654 }; 655 } // namespace 656 657 static mlir::Type getComplexEleTy(mlir::Type complex) { 658 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 659 return cc.getElementType(); 660 return complex.cast<fir::ComplexType>().getElementType(); 661 } 662 663 namespace { 664 /// Compare complex values 665 /// 666 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 667 /// 668 /// For completeness, all other comparison are done on the real component only. 669 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 670 using FIROpConversion::FIROpConversion; 671 672 mlir::LogicalResult 673 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 674 mlir::ConversionPatternRewriter &rewriter) const override { 675 mlir::ValueRange operands = adaptor.getOperands(); 676 mlir::MLIRContext *ctxt = cmp.getContext(); 677 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 678 mlir::Type resTy = convertType(cmp.getType()); 679 mlir::Location loc = cmp.getLoc(); 680 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 681 llvm::SmallVector<mlir::Value, 2> rp{ 682 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 683 pos0), 684 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 685 pos0)}; 686 auto rcp = 687 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 688 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 689 llvm::SmallVector<mlir::Value, 2> ip{ 690 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 691 pos1), 692 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 693 pos1)}; 694 auto icp = 695 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 696 llvm::SmallVector<mlir::Value, 2> cp{rcp, icp}; 697 switch (cmp.getPredicate()) { 698 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 699 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 700 break; 701 case mlir::arith::CmpFPredicate::UNE: // .NE. 702 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 703 break; 704 default: 705 rewriter.replaceOp(cmp, rcp.getResult()); 706 break; 707 } 708 return mlir::success(); 709 } 710 }; 711 712 /// Lower complex constants 713 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 714 using FIROpConversion::FIROpConversion; 715 716 mlir::LogicalResult 717 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 718 mlir::ConversionPatternRewriter &rewriter) const override { 719 mlir::Location loc = conc.getLoc(); 720 mlir::MLIRContext *ctx = conc.getContext(); 721 mlir::Type ty = convertType(conc.getType()); 722 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 723 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 724 auto realPart = 725 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 726 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 727 auto imPart = 728 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 729 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 730 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 731 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 732 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 733 loc, ty, undef, realPart, realIndex); 734 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 735 imPart, imIndex); 736 return mlir::success(); 737 } 738 739 inline llvm::APFloat getValue(mlir::Attribute attr) const { 740 return attr.cast<fir::RealAttr>().getValue(); 741 } 742 }; 743 744 /// convert value of from-type to value of to-type 745 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 746 using FIROpConversion::FIROpConversion; 747 748 static bool isFloatingPointTy(mlir::Type ty) { 749 return ty.isa<mlir::FloatType>(); 750 } 751 752 mlir::LogicalResult 753 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 754 mlir::ConversionPatternRewriter &rewriter) const override { 755 auto fromFirTy = convert.getValue().getType(); 756 auto toFirTy = convert.getRes().getType(); 757 auto fromTy = convertType(fromFirTy); 758 auto toTy = convertType(toFirTy); 759 mlir::Value op0 = adaptor.getOperands()[0]; 760 if (fromTy == toTy) { 761 rewriter.replaceOp(convert, op0); 762 return mlir::success(); 763 } 764 auto loc = convert.getLoc(); 765 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 766 unsigned toBits, mlir::Type toTy) -> mlir::Value { 767 if (fromBits == toBits) { 768 // TODO: Converting between two floating-point representations with the 769 // same bitwidth is not allowed for now. 770 mlir::emitError(loc, 771 "cannot implicitly convert between two floating-point " 772 "representations of the same bitwidth"); 773 return {}; 774 } 775 if (fromBits > toBits) 776 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 777 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 778 }; 779 // Complex to complex conversion. 780 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 781 // Special case: handle the conversion of a complex such that both the 782 // real and imaginary parts are converted together. 783 auto zero = mlir::ArrayAttr::get(convert.getContext(), 784 rewriter.getI32IntegerAttr(0)); 785 auto one = mlir::ArrayAttr::get(convert.getContext(), 786 rewriter.getI32IntegerAttr(1)); 787 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 788 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 789 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 790 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 791 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 792 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 793 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 794 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 795 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 796 auto i1 = 797 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 798 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 799 ic, one); 800 return mlir::success(); 801 } 802 803 // Follow UNIX F77 convention for logicals: 804 // 1. underlying integer is not zero => logical is .TRUE. 805 // 2. logical is .TRUE. => set underlying integer to 1. 806 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 807 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 808 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 809 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 810 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 811 return mlir::success(); 812 } 813 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 814 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 815 return mlir::success(); 816 } 817 818 // Floating point to floating point conversion. 819 if (isFloatingPointTy(fromTy)) { 820 if (isFloatingPointTy(toTy)) { 821 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 822 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 823 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 824 rewriter.replaceOp(convert, v); 825 return mlir::success(); 826 } 827 if (toTy.isa<mlir::IntegerType>()) { 828 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 829 return mlir::success(); 830 } 831 } else if (fromTy.isa<mlir::IntegerType>()) { 832 // Integer to integer conversion. 833 if (toTy.isa<mlir::IntegerType>()) { 834 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 835 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 836 assert(fromBits != toBits); 837 if (fromBits > toBits) { 838 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 839 return mlir::success(); 840 } 841 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 842 return mlir::success(); 843 } 844 // Integer to floating point conversion. 845 if (isFloatingPointTy(toTy)) { 846 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 847 return mlir::success(); 848 } 849 // Integer to pointer conversion. 850 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 851 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 852 return mlir::success(); 853 } 854 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 855 // Pointer to integer conversion. 856 if (toTy.isa<mlir::IntegerType>()) { 857 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 858 return mlir::success(); 859 } 860 // Pointer to pointer conversion. 861 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 862 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 863 return mlir::success(); 864 } 865 } 866 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 867 } 868 }; 869 870 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 871 /// table. 872 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 873 using FIROpConversion::FIROpConversion; 874 875 mlir::LogicalResult 876 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 877 mlir::ConversionPatternRewriter &rewriter) const override { 878 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 879 return mlir::failure(); 880 } 881 }; 882 883 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 884 /// derived type. 885 struct DispatchTableOpConversion 886 : public FIROpConversion<fir::DispatchTableOp> { 887 using FIROpConversion::FIROpConversion; 888 889 mlir::LogicalResult 890 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 891 mlir::ConversionPatternRewriter &rewriter) const override { 892 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 893 return mlir::failure(); 894 } 895 }; 896 897 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 898 /// method-name to a function. 899 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 900 using FIROpConversion::FIROpConversion; 901 902 mlir::LogicalResult 903 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 904 mlir::ConversionPatternRewriter &rewriter) const override { 905 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 906 return mlir::failure(); 907 } 908 }; 909 910 /// Lower `fir.global_len` operation. 911 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 912 using FIROpConversion::FIROpConversion; 913 914 mlir::LogicalResult 915 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 916 mlir::ConversionPatternRewriter &rewriter) const override { 917 TODO(globalLen.getLoc(), "fir.global_len codegen"); 918 return mlir::failure(); 919 } 920 }; 921 922 /// Lower fir.len_param_index 923 struct LenParamIndexOpConversion 924 : public FIROpConversion<fir::LenParamIndexOp> { 925 using FIROpConversion::FIROpConversion; 926 927 // FIXME: this should be specialized by the runtime target 928 mlir::LogicalResult 929 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 930 mlir::ConversionPatternRewriter &rewriter) const override { 931 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 932 } 933 }; 934 935 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 936 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 937 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 938 /// element is the length of the character buffer (`#n`). 939 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 940 using FIROpConversion::FIROpConversion; 941 942 mlir::LogicalResult 943 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 944 mlir::ConversionPatternRewriter &rewriter) const override { 945 mlir::ValueRange operands = adaptor.getOperands(); 946 auto *ctx = emboxChar.getContext(); 947 948 mlir::Value charBuffer = operands[0]; 949 mlir::Value charBufferLen = operands[1]; 950 951 mlir::Location loc = emboxChar.getLoc(); 952 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 953 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 954 955 mlir::Type lenTy = 956 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 957 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 958 959 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 960 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 961 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 962 loc, llvmStructTy, llvmStruct, charBuffer, c0); 963 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 964 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 965 966 return mlir::success(); 967 } 968 }; 969 } // namespace 970 971 /// Return the LLVMFuncOp corresponding to the standard malloc call. 972 static mlir::LLVM::LLVMFuncOp 973 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 974 auto module = op->getParentOfType<mlir::ModuleOp>(); 975 if (mlir::LLVM::LLVMFuncOp mallocFunc = 976 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 977 return mallocFunc; 978 mlir::OpBuilder moduleBuilder( 979 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 980 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 981 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 982 rewriter.getUnknownLoc(), "malloc", 983 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 984 indexType, 985 /*isVarArg=*/false)); 986 } 987 988 /// Helper function for generating the LLVM IR that computes the size 989 /// in bytes for a derived type. 990 static mlir::Value 991 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 992 mlir::ConversionPatternRewriter &rewriter) { 993 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 994 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 995 llvm::SmallVector<mlir::Value> args{one}; 996 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 997 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 998 } 999 1000 namespace { 1001 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 1002 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 1003 using FIROpConversion::FIROpConversion; 1004 1005 mlir::LogicalResult 1006 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 1007 mlir::ConversionPatternRewriter &rewriter) const override { 1008 auto heapTy = heap.getType(); 1009 auto ty = convertType(heapTy); 1010 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 1011 mlir::Location loc = heap.getLoc(); 1012 auto ity = lowerTy().indexType(); 1013 auto dataTy = fir::unwrapRefType(heapTy); 1014 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 1015 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 1016 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 1017 if (auto scaleSize = genAllocationScaleSize(heap, ity, rewriter)) 1018 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 1019 for (mlir::Value opnd : adaptor.getOperands()) 1020 size = rewriter.create<mlir::LLVM::MulOp>( 1021 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1022 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1023 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1024 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1025 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1026 malloc.getResult(0)); 1027 return mlir::success(); 1028 } 1029 1030 // Compute the (allocation) size of the allocmem type in bytes. 1031 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1032 mlir::ConversionPatternRewriter &rewriter, 1033 mlir::Type llTy) const { 1034 // Use the primitive size, if available. 1035 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1036 if (auto size = 1037 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1038 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1039 1040 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1041 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1042 } 1043 }; 1044 } // namespace 1045 1046 /// Return the LLVMFuncOp corresponding to the standard free call. 1047 static mlir::LLVM::LLVMFuncOp 1048 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1049 auto module = op->getParentOfType<mlir::ModuleOp>(); 1050 if (mlir::LLVM::LLVMFuncOp freeFunc = 1051 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1052 return freeFunc; 1053 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1054 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1055 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1056 rewriter.getUnknownLoc(), "free", 1057 mlir::LLVM::LLVMFunctionType::get(voidType, 1058 getVoidPtrType(op.getContext()), 1059 /*isVarArg=*/false)); 1060 } 1061 1062 namespace { 1063 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1064 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1065 using FIROpConversion::FIROpConversion; 1066 1067 mlir::LogicalResult 1068 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1069 mlir::ConversionPatternRewriter &rewriter) const override { 1070 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1071 mlir::Location loc = freemem.getLoc(); 1072 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1073 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1074 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1075 rewriter.create<mlir::LLVM::CallOp>( 1076 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1077 rewriter.eraseOp(freemem); 1078 return mlir::success(); 1079 } 1080 }; 1081 } // namespace 1082 1083 namespace {} // namespace 1084 1085 /// Common base class for embox to descriptor conversion. 1086 template <typename OP> 1087 struct EmboxCommonConversion : public FIROpConversion<OP> { 1088 using FIROpConversion<OP>::FIROpConversion; 1089 1090 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1091 // The order to find the LLVMFuncOp is as follows: 1092 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1093 // 2. The first ancestor that is a LLVMFuncOp. 1094 mlir::LLVM::LLVMFuncOp 1095 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1096 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1097 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1098 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1099 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1100 } 1101 1102 // Generate an alloca of size 1 and type \p toTy. 1103 mlir::LLVM::AllocaOp 1104 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1105 mlir::ConversionPatternRewriter &rewriter) const { 1106 auto thisPt = rewriter.saveInsertionPoint(); 1107 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1108 rewriter.setInsertionPointToStart(&func.front()); 1109 auto size = this->genI32Constant(loc, rewriter, 1); 1110 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1111 rewriter.restoreInsertionPoint(thisPt); 1112 return al; 1113 } 1114 1115 static int getCFIAttr(fir::BoxType boxTy) { 1116 auto eleTy = boxTy.getEleTy(); 1117 if (eleTy.isa<fir::PointerType>()) 1118 return CFI_attribute_pointer; 1119 if (eleTy.isa<fir::HeapType>()) 1120 return CFI_attribute_allocatable; 1121 return CFI_attribute_other; 1122 } 1123 1124 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1125 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1126 .template dyn_cast<fir::RecordType>(); 1127 } 1128 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1129 auto recTy = unwrapIfDerived(boxTy); 1130 return recTy && recTy.getNumLenParams() > 0; 1131 } 1132 static bool isDerivedType(fir::BoxType boxTy) { 1133 return unwrapIfDerived(boxTy) != nullptr; 1134 } 1135 1136 // Get the element size and CFI type code of the boxed value. 1137 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1138 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1139 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1140 auto doInteger = 1141 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1142 int typeCode = fir::integerBitsToTypeCode(width); 1143 return {this->genConstantOffset(loc, rewriter, width / 8), 1144 this->genConstantOffset(loc, rewriter, typeCode)}; 1145 }; 1146 auto doLogical = 1147 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1148 int typeCode = fir::logicalBitsToTypeCode(width); 1149 return {this->genConstantOffset(loc, rewriter, width / 8), 1150 this->genConstantOffset(loc, rewriter, typeCode)}; 1151 }; 1152 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1153 int typeCode = fir::realBitsToTypeCode(width); 1154 return {this->genConstantOffset(loc, rewriter, width / 8), 1155 this->genConstantOffset(loc, rewriter, typeCode)}; 1156 }; 1157 auto doComplex = 1158 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1159 auto typeCode = fir::complexBitsToTypeCode(width); 1160 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1161 this->genConstantOffset(loc, rewriter, typeCode)}; 1162 }; 1163 auto doCharacter = 1164 [&](unsigned width, 1165 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1166 auto typeCode = fir::characterBitsToTypeCode(width); 1167 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1168 if (width == 8) 1169 return {len, typeCodeVal}; 1170 auto byteWidth = this->genConstantOffset(loc, rewriter, width / 8); 1171 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1172 auto size = 1173 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len); 1174 return {size, typeCodeVal}; 1175 }; 1176 auto getKindMap = [&]() -> fir::KindMapping & { 1177 return this->lowerTy().getKindMap(); 1178 }; 1179 // Pointer-like types. 1180 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1181 boxEleTy = eleTy; 1182 // Integer types. 1183 if (fir::isa_integer(boxEleTy)) { 1184 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1185 return doInteger(ty.getWidth()); 1186 auto ty = boxEleTy.cast<fir::IntegerType>(); 1187 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1188 } 1189 // Floating point types. 1190 if (fir::isa_real(boxEleTy)) { 1191 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1192 return doFloat(ty.getWidth()); 1193 auto ty = boxEleTy.cast<fir::RealType>(); 1194 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1195 } 1196 // Complex types. 1197 if (fir::isa_complex(boxEleTy)) { 1198 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1199 return doComplex( 1200 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1201 auto ty = boxEleTy.cast<fir::ComplexType>(); 1202 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1203 } 1204 // Character types. 1205 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1206 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1207 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1208 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1209 return doCharacter(charWidth, len); 1210 } 1211 assert(!lenParams.empty()); 1212 return doCharacter(charWidth, lenParams.back()); 1213 } 1214 // Logical type. 1215 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1216 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1217 // Array types. 1218 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1219 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1220 // Derived-type types. 1221 if (boxEleTy.isa<fir::RecordType>()) { 1222 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1223 this->lowerTy().convertType(boxEleTy)); 1224 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1225 auto one = 1226 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1227 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1228 mlir::ValueRange{one}); 1229 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1230 loc, this->lowerTy().indexType(), gep); 1231 return {eleSize, 1232 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1233 } 1234 // Reference type. 1235 if (fir::isa_ref_type(boxEleTy)) { 1236 // FIXME: use the target pointer size rather than sizeof(void*) 1237 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1238 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1239 } 1240 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1241 } 1242 1243 /// Basic pattern to write a field in the descriptor 1244 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1245 mlir::Location loc, mlir::Value dest, 1246 llvm::ArrayRef<unsigned> fldIndexes, 1247 mlir::Value value, bool bitcast = false) const { 1248 auto boxTy = dest.getType(); 1249 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1250 if (bitcast) 1251 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1252 else 1253 value = this->integerCast(loc, rewriter, fldTy, value); 1254 llvm::SmallVector<mlir::Attribute, 2> attrs; 1255 for (auto i : fldIndexes) 1256 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1257 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1258 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1259 indexesAttr); 1260 } 1261 1262 inline mlir::Value 1263 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1264 mlir::Location loc, mlir::Value dest, 1265 mlir::Value base) const { 1266 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1267 /*bitCast=*/true); 1268 } 1269 1270 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1271 mlir::Location loc, mlir::Value dest, 1272 unsigned dim, mlir::Value lb) const { 1273 return insertField(rewriter, loc, dest, 1274 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1275 } 1276 1277 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1278 mlir::Location loc, mlir::Value dest, 1279 unsigned dim, mlir::Value extent) const { 1280 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1281 extent); 1282 } 1283 1284 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1285 mlir::Location loc, mlir::Value dest, 1286 unsigned dim, mlir::Value stride) const { 1287 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1288 stride); 1289 } 1290 1291 /// Get the address of the type descriptor global variable that was created by 1292 /// lowering for derived type \p recType. 1293 template <typename BOX> 1294 mlir::Value 1295 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1296 mlir::Location loc, fir::RecordType recType) const { 1297 std::string name = 1298 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1299 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1300 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1301 auto ty = mlir::LLVM::LLVMPointerType::get( 1302 this->lowerTy().convertType(global.getType())); 1303 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1304 global.getSymName()); 1305 } 1306 if (auto global = 1307 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1308 // The global may have already been translated to LLVM. 1309 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1310 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1311 global.getSymName()); 1312 } 1313 // Type info derived types do not have type descriptors since they are the 1314 // types defining type descriptors. 1315 if (!this->options.ignoreMissingTypeDescriptors && 1316 !fir::NameUniquer::belongsToModule( 1317 name, Fortran::semantics::typeInfoBuiltinModule)) 1318 fir::emitFatalError( 1319 loc, "runtime derived type info descriptor was not generated"); 1320 return rewriter.create<mlir::LLVM::NullOp>( 1321 loc, ::getVoidPtrType(box.getContext())); 1322 } 1323 1324 template <typename BOX> 1325 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1326 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1327 unsigned rank, mlir::ValueRange lenParams) const { 1328 auto loc = box.getLoc(); 1329 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1330 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1331 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1332 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1333 mlir::Value descriptor = 1334 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1335 1336 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1337 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1338 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1339 typeparams.push_back(box.substr()[1]); 1340 } 1341 1342 // Write each of the fields with the appropriate values 1343 auto [eleSize, cfiTy] = 1344 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1345 descriptor = 1346 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1347 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1348 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1349 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1350 this->genI32Constant(loc, rewriter, rank)); 1351 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1352 descriptor = 1353 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1354 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1355 const bool hasAddendum = isDerivedType(boxTy); 1356 descriptor = 1357 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1358 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1359 1360 if (hasAddendum) { 1361 auto isArray = 1362 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1363 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1364 auto typeDesc = 1365 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1366 descriptor = 1367 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1368 /*bitCast=*/true); 1369 } 1370 1371 return {boxTy, descriptor, eleSize}; 1372 } 1373 1374 /// Compute the base address of a substring given the base address of a scalar 1375 /// string and the zero based string lower bound. 1376 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1377 mlir::Location loc, mlir::Value base, 1378 mlir::Value lowerBound) const { 1379 llvm::SmallVector<mlir::Value> gepOperands; 1380 auto baseType = 1381 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1382 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1383 auto idxTy = this->lowerTy().indexType(); 1384 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1385 gepOperands.push_back(zero); 1386 } 1387 gepOperands.push_back(lowerBound); 1388 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1389 } 1390 1391 /// If the embox is not in a globalOp body, allocate storage for the box; 1392 /// store the value inside and return the generated alloca. Return the input 1393 /// value otherwise. 1394 mlir::Value 1395 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1396 mlir::Location loc, mlir::Value boxValue) const { 1397 auto *thisBlock = rewriter.getInsertionBlock(); 1398 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1399 return boxValue; 1400 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1401 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1402 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1403 return alloca; 1404 } 1405 }; 1406 1407 /// Compute the extent of a triplet slice (lb:ub:step). 1408 static mlir::Value 1409 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1410 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1411 mlir::Value step, mlir::Value zero, mlir::Type type) { 1412 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1413 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1414 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1415 // If the resulting extent is negative (`ub-lb` and `step` have different 1416 // signs), zero must be returned instead. 1417 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1418 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1419 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1420 } 1421 1422 /// Create a generic box on a memory reference. This conversions lowers the 1423 /// abstract box to the appropriate, initialized descriptor. 1424 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1425 using EmboxCommonConversion::EmboxCommonConversion; 1426 1427 mlir::LogicalResult 1428 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1429 mlir::ConversionPatternRewriter &rewriter) const override { 1430 assert(!embox.getShape() && "There should be no dims on this embox op"); 1431 auto [boxTy, dest, eleSize] = 1432 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1433 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1434 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1435 adaptor.getOperands()[0]); 1436 if (isDerivedTypeWithLenParams(boxTy)) { 1437 TODO(embox.getLoc(), 1438 "fir.embox codegen of derived with length parameters"); 1439 return mlir::failure(); 1440 } 1441 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1442 rewriter.replaceOp(embox, result); 1443 return mlir::success(); 1444 } 1445 }; 1446 1447 /// Create a generic box on a memory reference. 1448 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1449 using EmboxCommonConversion::EmboxCommonConversion; 1450 1451 mlir::LogicalResult 1452 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1453 mlir::ConversionPatternRewriter &rewriter) const override { 1454 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1455 xbox, rewriter, xbox.getOutRank(), 1456 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1457 // Generate the triples in the dims field of the descriptor 1458 mlir::ValueRange operands = adaptor.getOperands(); 1459 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1460 mlir::Value base = operands[0]; 1461 assert(!xbox.shape().empty() && "must have a shape"); 1462 unsigned shapeOffset = xbox.shapeOffset(); 1463 bool hasShift = !xbox.shift().empty(); 1464 unsigned shiftOffset = xbox.shiftOffset(); 1465 bool hasSlice = !xbox.slice().empty(); 1466 unsigned sliceOffset = xbox.sliceOffset(); 1467 mlir::Location loc = xbox.getLoc(); 1468 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1469 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1470 mlir::Value prevDim = integerCast(loc, rewriter, i64Ty, eleSize); 1471 mlir::Value prevPtrOff = one; 1472 mlir::Type eleTy = boxTy.getEleTy(); 1473 const unsigned rank = xbox.getRank(); 1474 llvm::SmallVector<mlir::Value> gepArgs; 1475 unsigned constRows = 0; 1476 mlir::Value ptrOffset = zero; 1477 if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType())) 1478 if (auto seqTy = memEleTy.dyn_cast<fir::SequenceType>()) { 1479 mlir::Type seqEleTy = seqTy.getEleTy(); 1480 // Adjust the element scaling factor if the element is a dependent type. 1481 if (fir::hasDynamicSize(seqEleTy)) { 1482 if (fir::isa_char(seqEleTy)) { 1483 assert(xbox.lenParams().size() == 1); 1484 prevPtrOff = integerCast(loc, rewriter, i64Ty, 1485 operands[xbox.lenParamOffset()]); 1486 } else if (seqEleTy.isa<fir::RecordType>()) { 1487 TODO(loc, "generate call to calculate size of PDT"); 1488 } else { 1489 return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type"); 1490 } 1491 } else { 1492 constRows = seqTy.getConstantRows(); 1493 } 1494 } 1495 1496 bool hasSubcomp = !xbox.subcomponent().empty(); 1497 if (!xbox.substr().empty()) 1498 TODO(loc, "codegen of fir.embox with substring"); 1499 1500 mlir::Value stepExpr; 1501 if (hasSubcomp) { 1502 // We have a subcomponent. The step value needs to be the number of 1503 // bytes per element (which is a derived type). 1504 mlir::Type ty0 = base.getType(); 1505 [[maybe_unused]] auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1506 assert(ptrTy && "expected pointer type"); 1507 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1508 assert(memEleTy && "expected fir pointer type"); 1509 auto seqTy = memEleTy.dyn_cast<fir::SequenceType>(); 1510 assert(seqTy && "expected sequence type"); 1511 mlir::Type seqEleTy = seqTy.getEleTy(); 1512 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1513 stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1514 } 1515 1516 // Process the array subspace arguments (shape, shift, etc.), if any, 1517 // translating everything to values in the descriptor wherever the entity 1518 // has a dynamic array dimension. 1519 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1520 mlir::Value extent = operands[shapeOffset]; 1521 mlir::Value outerExtent = extent; 1522 bool skipNext = false; 1523 if (hasSlice) { 1524 mlir::Value off = operands[sliceOffset]; 1525 mlir::Value adj = one; 1526 if (hasShift) 1527 adj = operands[shiftOffset]; 1528 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1529 if (constRows > 0) { 1530 gepArgs.push_back(ao); 1531 } else { 1532 auto dimOff = 1533 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1534 ptrOffset = 1535 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1536 } 1537 if (mlir::isa_and_nonnull<fir::UndefOp>( 1538 xbox.slice()[3 * di + 1].getDefiningOp())) { 1539 // This dimension contains a scalar expression in the array slice op. 1540 // The dimension is loop invariant, will be dropped, and will not 1541 // appear in the descriptor. 1542 skipNext = true; 1543 } 1544 } 1545 if (!skipNext) { 1546 if (hasSlice) 1547 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1548 operands[sliceOffset + 1], 1549 operands[sliceOffset + 2], zero, i64Ty); 1550 // store lower bound (normally 0) for BIND(C) interoperability. 1551 mlir::Value lb = zero; 1552 const bool isaPointerOrAllocatable = 1553 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1554 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1555 // denormalized descriptors. 1556 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) { 1557 lb = one; 1558 // If there is a shifted origin, and no fir.slice, and this is not 1559 // a normalized descriptor then use the value from the shift op as 1560 // the lower bound. 1561 if (hasShift && !(hasSlice || hasSubcomp)) { 1562 lb = operands[shiftOffset]; 1563 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1564 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1565 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1566 lb); 1567 } 1568 } 1569 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1570 1571 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1572 1573 // store step (scaled by shaped extent) 1574 1575 mlir::Value step = hasSubcomp ? stepExpr : prevDim; 1576 if (hasSlice) 1577 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1578 operands[sliceOffset + 2]); 1579 dest = insertStride(rewriter, loc, dest, descIdx, step); 1580 ++descIdx; 1581 } 1582 1583 // compute the stride and offset for the next natural dimension 1584 prevDim = 1585 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent); 1586 if (constRows == 0) 1587 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1588 outerExtent); 1589 else 1590 --constRows; 1591 1592 // increment iterators 1593 ++shapeOffset; 1594 if (hasShift) 1595 ++shiftOffset; 1596 if (hasSlice) 1597 sliceOffset += 3; 1598 } 1599 if (hasSlice || hasSubcomp || !xbox.substr().empty()) { 1600 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1601 args.append(gepArgs.rbegin(), gepArgs.rend()); 1602 if (hasSubcomp) { 1603 // For each field in the path add the offset to base via the args list. 1604 // In the most general case, some offsets must be computed since 1605 // they are not be known until runtime. 1606 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1607 fir::unwrapPassByRefType(xbox.memref().getType())))) 1608 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1609 args.append(operands.begin() + xbox.subcomponentOffset(), 1610 operands.begin() + xbox.subcomponentOffset() + 1611 xbox.subcomponent().size()); 1612 } 1613 base = 1614 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1615 if (!xbox.substr().empty()) 1616 base = shiftSubstringBase(rewriter, loc, base, 1617 operands[xbox.substrOffset()]); 1618 } 1619 dest = insertBaseAddress(rewriter, loc, dest, base); 1620 if (isDerivedTypeWithLenParams(boxTy)) 1621 TODO(loc, "fir.embox codegen of derived with length parameters"); 1622 1623 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1624 rewriter.replaceOp(xbox, result); 1625 return mlir::success(); 1626 } 1627 1628 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1629 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1630 /// zero origin lower bound for interoperability with BIND(C). 1631 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1632 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1633 } 1634 }; 1635 1636 /// Create a new box given a box reference. 1637 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1638 using EmboxCommonConversion::EmboxCommonConversion; 1639 1640 mlir::LogicalResult 1641 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1642 mlir::ConversionPatternRewriter &rewriter) const override { 1643 mlir::Location loc = rebox.getLoc(); 1644 mlir::Type idxTy = lowerTy().indexType(); 1645 mlir::Value loweredBox = adaptor.getOperands()[0]; 1646 mlir::ValueRange operands = adaptor.getOperands(); 1647 1648 // Create new descriptor and fill its non-shape related data. 1649 llvm::SmallVector<mlir::Value, 2> lenParams; 1650 mlir::Type inputEleTy = getInputEleTy(rebox); 1651 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1652 mlir::Value len = 1653 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1654 if (charTy.getFKind() != 1) { 1655 mlir::Value width = 1656 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1657 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1658 } 1659 lenParams.emplace_back(len); 1660 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1661 if (recTy.getNumLenParams() != 0) 1662 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1663 } 1664 auto [boxTy, dest, eleSize] = 1665 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1666 1667 // Read input extents, strides, and base address 1668 llvm::SmallVector<mlir::Value> inputExtents; 1669 llvm::SmallVector<mlir::Value> inputStrides; 1670 const unsigned inputRank = rebox.getRank(); 1671 for (unsigned i = 0; i < inputRank; ++i) { 1672 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1673 llvm::SmallVector<mlir::Value, 3> dimInfo = 1674 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1675 inputExtents.emplace_back(dimInfo[1]); 1676 inputStrides.emplace_back(dimInfo[2]); 1677 } 1678 1679 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1680 mlir::Value baseAddr = 1681 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1682 1683 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1684 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1685 operands, rewriter); 1686 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1687 operands, rewriter); 1688 } 1689 1690 private: 1691 /// Write resulting shape and base address in descriptor, and replace rebox 1692 /// op. 1693 mlir::LogicalResult 1694 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1695 mlir::ValueRange lbounds, mlir::ValueRange extents, 1696 mlir::ValueRange strides, 1697 mlir::ConversionPatternRewriter &rewriter) const { 1698 mlir::Location loc = rebox.getLoc(); 1699 mlir::Value zero = 1700 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1701 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1702 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1703 mlir::Value extent = std::get<0>(iter.value()); 1704 unsigned dim = iter.index(); 1705 mlir::Value lb = one; 1706 if (!lbounds.empty()) { 1707 lb = lbounds[dim]; 1708 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1709 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1710 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1711 }; 1712 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1713 dest = insertExtent(rewriter, loc, dest, dim, extent); 1714 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1715 } 1716 dest = insertBaseAddress(rewriter, loc, dest, base); 1717 mlir::Value result = 1718 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1719 rewriter.replaceOp(rebox, result); 1720 return mlir::success(); 1721 } 1722 1723 // Apply slice given the base address, extents and strides of the input box. 1724 mlir::LogicalResult 1725 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1726 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1727 mlir::ValueRange operands, 1728 mlir::ConversionPatternRewriter &rewriter) const { 1729 mlir::Location loc = rebox.getLoc(); 1730 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1731 mlir::Type idxTy = lowerTy().indexType(); 1732 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1733 // Apply subcomponent and substring shift on base address. 1734 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1735 // Cast to inputEleTy* so that a GEP can be used. 1736 mlir::Type inputEleTy = getInputEleTy(rebox); 1737 auto llvmElePtrTy = 1738 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1739 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1740 1741 if (!rebox.subcomponent().empty()) { 1742 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1743 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1744 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1745 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1746 } 1747 if (!rebox.substr().empty()) 1748 base = shiftSubstringBase(rewriter, loc, base, 1749 operands[rebox.substrOffset()]); 1750 } 1751 1752 if (rebox.slice().empty()) 1753 // The array section is of the form array[%component][substring], keep 1754 // the input array extents and strides. 1755 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1756 inputExtents, inputStrides, rewriter); 1757 1758 // Strides from the fir.box are in bytes. 1759 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1760 1761 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1762 // and strides. 1763 llvm::SmallVector<mlir::Value> slicedExtents; 1764 llvm::SmallVector<mlir::Value> slicedStrides; 1765 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1766 const bool sliceHasOrigins = !rebox.shift().empty(); 1767 unsigned sliceOps = rebox.sliceOffset(); 1768 unsigned shiftOps = rebox.shiftOffset(); 1769 auto strideOps = inputStrides.begin(); 1770 const unsigned inputRank = inputStrides.size(); 1771 for (unsigned i = 0; i < inputRank; 1772 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1773 mlir::Value sliceLb = 1774 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1775 mlir::Value inputStride = *strideOps; // already idxTy 1776 // Apply origin shift: base += (lb-shift)*input_stride 1777 mlir::Value sliceOrigin = 1778 sliceHasOrigins 1779 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1780 : one; 1781 mlir::Value diff = 1782 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1783 mlir::Value offset = 1784 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1785 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1786 // Apply upper bound and step if this is a triplet. Otherwise, the 1787 // dimension is dropped and no extents/strides are computed. 1788 mlir::Value upper = operands[sliceOps + 1]; 1789 const bool isTripletSlice = 1790 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1791 if (isTripletSlice) { 1792 mlir::Value step = 1793 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1794 // extent = ub-lb+step/step 1795 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1796 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1797 sliceUb, step, zero, idxTy); 1798 slicedExtents.emplace_back(extent); 1799 // stride = step*input_stride 1800 mlir::Value stride = 1801 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1802 slicedStrides.emplace_back(stride); 1803 } 1804 } 1805 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1806 slicedExtents, slicedStrides, rewriter); 1807 } 1808 1809 /// Apply a new shape to the data described by a box given the base address, 1810 /// extents and strides of the box. 1811 mlir::LogicalResult 1812 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1813 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1814 mlir::ValueRange operands, 1815 mlir::ConversionPatternRewriter &rewriter) const { 1816 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1817 operands.begin() + rebox.shiftOffset() + 1818 rebox.shift().size()}; 1819 if (rebox.shape().empty()) { 1820 // Only setting new lower bounds. 1821 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1822 inputStrides, rewriter); 1823 } 1824 1825 mlir::Location loc = rebox.getLoc(); 1826 // Strides from the fir.box are in bytes. 1827 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1828 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1829 1830 llvm::SmallVector<mlir::Value> newStrides; 1831 llvm::SmallVector<mlir::Value> newExtents; 1832 mlir::Type idxTy = lowerTy().indexType(); 1833 // First stride from input box is kept. The rest is assumed contiguous 1834 // (it is not possible to reshape otherwise). If the input is scalar, 1835 // which may be OK if all new extents are ones, the stride does not 1836 // matter, use one. 1837 mlir::Value stride = inputStrides.empty() 1838 ? genConstantIndex(loc, idxTy, rewriter, 1) 1839 : inputStrides[0]; 1840 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1841 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1842 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1843 newExtents.emplace_back(extent); 1844 newStrides.emplace_back(stride); 1845 // nextStride = extent * stride; 1846 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1847 } 1848 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1849 rewriter); 1850 } 1851 1852 /// Return scalar element type of the input box. 1853 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1854 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1855 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1856 return seqTy.getEleTy(); 1857 return ty; 1858 } 1859 }; 1860 1861 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1862 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1863 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1864 using FIROpConversion::FIROpConversion; 1865 1866 mlir::LogicalResult 1867 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1868 mlir::ConversionPatternRewriter &rewriter) const override { 1869 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1870 return mlir::failure(); 1871 } 1872 }; 1873 1874 // Code shared between insert_value and extract_value Ops. 1875 struct ValueOpCommon { 1876 // Translate the arguments pertaining to any multidimensional array to 1877 // row-major order for LLVM-IR. 1878 static void toRowMajor(llvm::SmallVectorImpl<mlir::Attribute> &attrs, 1879 mlir::Type ty) { 1880 assert(ty && "type is null"); 1881 const auto end = attrs.size(); 1882 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1883 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1884 const auto dim = getDimension(seq); 1885 if (dim > 1) { 1886 auto ub = std::min(i + dim, end); 1887 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1888 i += dim - 1; 1889 } 1890 ty = getArrayElementType(seq); 1891 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1892 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1893 } else { 1894 llvm_unreachable("index into invalid type"); 1895 } 1896 } 1897 } 1898 1899 static llvm::SmallVector<mlir::Attribute> 1900 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1901 mlir::ArrayAttr arrAttr) { 1902 llvm::SmallVector<mlir::Attribute> attrs; 1903 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1904 if (i->isa<mlir::IntegerAttr>()) { 1905 attrs.push_back(*i); 1906 } else { 1907 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1908 ++i; 1909 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1910 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1911 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1912 } 1913 } 1914 return attrs; 1915 } 1916 1917 private: 1918 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1919 unsigned result = 1; 1920 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1921 eleTy; 1922 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1923 ++result; 1924 return result; 1925 } 1926 1927 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1928 auto eleTy = ty.getElementType(); 1929 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1930 eleTy = arrTy.getElementType(); 1931 return eleTy; 1932 } 1933 }; 1934 1935 namespace { 1936 /// Extract a subobject value from an ssa-value of aggregate type 1937 struct ExtractValueOpConversion 1938 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1939 public ValueOpCommon { 1940 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1941 1942 mlir::LogicalResult 1943 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1944 mlir::ConversionPatternRewriter &rewriter) const override { 1945 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1946 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1947 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1948 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1949 extractVal, ty, adaptor.getOperands()[0], position); 1950 return mlir::success(); 1951 } 1952 }; 1953 1954 /// InsertValue is the generalized instruction for the composition of new 1955 /// aggregate type values. 1956 struct InsertValueOpConversion 1957 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1958 public ValueOpCommon { 1959 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1960 1961 mlir::LogicalResult 1962 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1963 mlir::ConversionPatternRewriter &rewriter) const override { 1964 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1965 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1966 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1967 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1968 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 1969 position); 1970 return mlir::success(); 1971 } 1972 }; 1973 1974 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1975 struct InsertOnRangeOpConversion 1976 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1977 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1978 1979 // Increments an array of subscripts in a row major fasion. 1980 void incrementSubscripts(const llvm::SmallVector<uint64_t> &dims, 1981 llvm::SmallVector<uint64_t> &subscripts) const { 1982 for (size_t i = dims.size(); i > 0; --i) { 1983 if (++subscripts[i - 1] < dims[i - 1]) { 1984 return; 1985 } 1986 subscripts[i - 1] = 0; 1987 } 1988 } 1989 1990 mlir::LogicalResult 1991 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 1992 mlir::ConversionPatternRewriter &rewriter) const override { 1993 1994 llvm::SmallVector<uint64_t> dims; 1995 auto type = adaptor.getOperands()[0].getType(); 1996 1997 // Iteratively extract the array dimensions from the type. 1998 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1999 dims.push_back(t.getNumElements()); 2000 type = t.getElementType(); 2001 } 2002 2003 llvm::SmallVector<uint64_t> lBounds; 2004 llvm::SmallVector<uint64_t> uBounds; 2005 2006 // Unzip the upper and lower bound and convert to a row major format. 2007 mlir::DenseIntElementsAttr coor = range.getCoor(); 2008 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2009 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2010 uBounds.push_back(*i++); 2011 lBounds.push_back(*i); 2012 } 2013 2014 auto &subscripts = lBounds; 2015 auto loc = range.getLoc(); 2016 mlir::Value lastOp = adaptor.getOperands()[0]; 2017 mlir::Value insertVal = adaptor.getOperands()[1]; 2018 2019 auto i64Ty = rewriter.getI64Type(); 2020 while (subscripts != uBounds) { 2021 // Convert uint64_t's to Attribute's. 2022 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2023 for (const auto &subscript : subscripts) 2024 subscriptAttrs.push_back(mlir::IntegerAttr::get(i64Ty, subscript)); 2025 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2026 loc, ty, lastOp, insertVal, 2027 mlir::ArrayAttr::get(range.getContext(), subscriptAttrs)); 2028 2029 incrementSubscripts(dims, subscripts); 2030 } 2031 2032 // Convert uint64_t's to Attribute's. 2033 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2034 for (const auto &subscript : subscripts) 2035 subscriptAttrs.push_back( 2036 mlir::IntegerAttr::get(rewriter.getI64Type(), subscript)); 2037 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2038 2039 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2040 range, ty, lastOp, insertVal, 2041 mlir::ArrayAttr::get(range.getContext(), arrayRef)); 2042 2043 return mlir::success(); 2044 } 2045 }; 2046 } // namespace 2047 2048 namespace { 2049 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2050 /// shifted etc. array. 2051 /// (See the static restriction on coordinate_of.) array_coor determines the 2052 /// coordinate (location) of a specific element. 2053 struct XArrayCoorOpConversion 2054 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2055 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2056 2057 mlir::LogicalResult 2058 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2059 mlir::ConversionPatternRewriter &rewriter) const override { 2060 auto loc = coor.getLoc(); 2061 mlir::ValueRange operands = adaptor.getOperands(); 2062 unsigned rank = coor.getRank(); 2063 assert(coor.indices().size() == rank); 2064 assert(coor.shape().empty() || coor.shape().size() == rank); 2065 assert(coor.shift().empty() || coor.shift().size() == rank); 2066 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2067 mlir::Type idxTy = lowerTy().indexType(); 2068 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2069 mlir::Value prevExt = one; 2070 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2071 mlir::Value offset = zero; 2072 const bool isShifted = !coor.shift().empty(); 2073 const bool isSliced = !coor.slice().empty(); 2074 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2075 2076 // For each dimension of the array, generate the offset calculation. 2077 for (unsigned i = 0; i < rank; ++i) { 2078 mlir::Value index = 2079 integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); 2080 mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, 2081 operands[coor.shiftOffset() + i]) 2082 : one; 2083 mlir::Value step = one; 2084 bool normalSlice = isSliced; 2085 // Compute zero based index in dimension i of the element, applying 2086 // potential triplets and lower bounds. 2087 if (isSliced) { 2088 mlir::Value ub = operands[coor.sliceOffset() + i + 1]; 2089 normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp()); 2090 if (normalSlice) 2091 step = integerCast(loc, rewriter, idxTy, 2092 operands[coor.sliceOffset() + i + 2]); 2093 } 2094 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2095 mlir::Value diff = 2096 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2097 if (normalSlice) { 2098 mlir::Value sliceLb = 2099 integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]); 2100 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2101 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2102 } 2103 // Update the offset given the stride and the zero based index `diff` 2104 // that was just computed. 2105 if (baseIsBoxed) { 2106 // Use stride in bytes from the descriptor. 2107 mlir::Value stride = 2108 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2109 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2110 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2111 } else { 2112 // Use stride computed at last iteration. 2113 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2114 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2115 // Compute next stride assuming contiguity of the base array 2116 // (in element number). 2117 auto nextExt = 2118 integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]); 2119 prevExt = 2120 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2121 } 2122 } 2123 2124 // Add computed offset to the base address. 2125 if (baseIsBoxed) { 2126 // Working with byte offsets. The base address is read from the fir.box. 2127 // and need to be casted to i8* to do the pointer arithmetic. 2128 mlir::Type baseTy = 2129 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2130 mlir::Value base = 2131 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2132 mlir::Type voidPtrTy = getVoidPtrType(); 2133 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2134 llvm::SmallVector<mlir::Value> args{offset}; 2135 auto addr = 2136 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2137 if (coor.subcomponent().empty()) { 2138 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2139 return mlir::success(); 2140 } 2141 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2142 args.clear(); 2143 args.push_back(zero); 2144 if (!coor.lenParams().empty()) { 2145 // If type parameters are present, then we don't want to use a GEPOp 2146 // as below, as the LLVM struct type cannot be statically defined. 2147 TODO(loc, "derived type with type parameters"); 2148 } 2149 // TODO: array offset subcomponents must be converted to LLVM's 2150 // row-major layout here. 2151 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2152 args.push_back(operands[i]); 2153 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2154 args); 2155 return mlir::success(); 2156 } 2157 2158 // The array was not boxed, so it must be contiguous. offset is therefore an 2159 // element offset and the base type is kept in the GEP unless the element 2160 // type size is itself dynamic. 2161 mlir::Value base; 2162 if (coor.subcomponent().empty()) { 2163 // No subcomponent. 2164 if (!coor.lenParams().empty()) { 2165 // Type parameters. Adjust element size explicitly. 2166 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2167 assert(eleTy && "result must be a reference-like type"); 2168 if (fir::characterWithDynamicLen(eleTy)) { 2169 assert(coor.lenParams().size() == 1); 2170 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2171 eleTy.cast<fir::CharacterType>().getFKind()); 2172 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2173 auto scaledBySize = 2174 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2175 auto length = 2176 integerCast(loc, rewriter, idxTy, 2177 adaptor.getOperands()[coor.lenParamsOffset()]); 2178 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2179 length); 2180 } else { 2181 TODO(loc, "compute size of derived type with type parameters"); 2182 } 2183 } 2184 // Cast the base address to a pointer to T. 2185 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2186 adaptor.getOperands()[0]); 2187 } else { 2188 // Operand #0 must have a pointer type. For subcomponent slicing, we 2189 // want to cast away the array type and have a plain struct type. 2190 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2191 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2192 assert(ptrTy && "expected pointer type"); 2193 mlir::Type eleTy = ptrTy.getElementType(); 2194 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2195 eleTy = arrTy.getElementType(); 2196 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2197 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2198 adaptor.getOperands()[0]); 2199 } 2200 llvm::SmallVector<mlir::Value> args = {offset}; 2201 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2202 args.push_back(operands[i]); 2203 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2204 return mlir::success(); 2205 } 2206 }; 2207 } // namespace 2208 2209 /// Convert to (memory) reference to a reference to a subobject. 2210 /// The coordinate_of op is a Swiss army knife operation that can be used on 2211 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2212 /// With unboxed arrays, there is the restriction that the array have a static 2213 /// shape in all but the last column. 2214 struct CoordinateOpConversion 2215 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2216 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2217 2218 mlir::LogicalResult 2219 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2220 mlir::ConversionPatternRewriter &rewriter) const override { 2221 mlir::ValueRange operands = adaptor.getOperands(); 2222 2223 mlir::Location loc = coor.getLoc(); 2224 mlir::Value base = operands[0]; 2225 mlir::Type baseObjectTy = coor.getBaseType(); 2226 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2227 assert(objectTy && "fir.coordinate_of expects a reference type"); 2228 2229 // Complex type - basically, extract the real or imaginary part 2230 if (fir::isa_complex(objectTy)) { 2231 mlir::LLVM::ConstantOp c0 = 2232 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2233 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]}; 2234 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2235 rewriter.replaceOp(coor, gep); 2236 return mlir::success(); 2237 } 2238 2239 // Boxed type - get the base pointer from the box 2240 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2241 return doRewriteBox(coor, ty, operands, loc, rewriter); 2242 2243 // Reference, pointer or a heap type 2244 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>()) 2245 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2246 2247 return rewriter.notifyMatchFailure( 2248 coor, "fir.coordinate_of base operand has unsupported type"); 2249 } 2250 2251 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) { 2252 return fir::hasDynamicSize(ty) 2253 ? op.getDefiningOp() 2254 ->getAttrOfType<mlir::IntegerAttr>("field") 2255 .getInt() 2256 : getIntValue(op); 2257 } 2258 2259 static int64_t getIntValue(mlir::Value val) { 2260 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2261 mlir::Operation *defop = val.getDefiningOp(); 2262 2263 if (auto constOp = mlir::dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2264 return constOp.value(); 2265 if (auto llConstOp = mlir::dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2266 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2267 return attr.getValue().getSExtValue(); 2268 fir::emitFatalError(val.getLoc(), "must be a constant"); 2269 } 2270 2271 static bool hasSubDimensions(mlir::Type type) { 2272 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2273 } 2274 2275 /// Check whether this form of `!fir.coordinate_of` is supported. These 2276 /// additional checks are required, because we are not yet able to convert 2277 /// all valid forms of `!fir.coordinate_of`. 2278 /// TODO: Either implement the unsupported cases or extend the verifier 2279 /// in FIROps.cpp instead. 2280 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) { 2281 const std::size_t numOfCoors = coors.size(); 2282 std::size_t i = 0; 2283 bool subEle = false; 2284 bool ptrEle = false; 2285 for (; i < numOfCoors; ++i) { 2286 mlir::Value nxtOpnd = coors[i]; 2287 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2288 subEle = true; 2289 i += arrTy.getDimension() - 1; 2290 type = arrTy.getEleTy(); 2291 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2292 subEle = true; 2293 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2294 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2295 subEle = true; 2296 type = tupTy.getType(getIntValue(nxtOpnd)); 2297 } else { 2298 ptrEle = true; 2299 } 2300 } 2301 if (ptrEle) 2302 return (!subEle) && (numOfCoors == 1); 2303 return subEle && (i >= numOfCoors); 2304 } 2305 2306 /// Walk the abstract memory layout and determine if the path traverses any 2307 /// array types with unknown shape. Return true iff all the array types have a 2308 /// constant shape along the path. 2309 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) { 2310 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) { 2311 mlir::Value nxtOpnd = coors[i]; 2312 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2313 if (fir::sequenceWithNonConstantShape(arrTy)) 2314 return false; 2315 i += arrTy.getDimension() - 1; 2316 type = arrTy.getEleTy(); 2317 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2318 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2319 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2320 type = strTy.getType(getIntValue(nxtOpnd)); 2321 } else { 2322 return true; 2323 } 2324 } 2325 return true; 2326 } 2327 2328 private: 2329 mlir::LogicalResult 2330 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2331 mlir::Location loc, 2332 mlir::ConversionPatternRewriter &rewriter) const { 2333 mlir::Type boxObjTy = coor.getBaseType(); 2334 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2335 2336 mlir::Value boxBaseAddr = operands[0]; 2337 2338 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2339 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2340 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2341 // %addr = coordinate_of %box, %lenp 2342 if (coor.getNumOperands() == 2) { 2343 mlir::Operation *coordinateDef = 2344 (*coor.getCoor().begin()).getDefiningOp(); 2345 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) 2346 TODO(loc, 2347 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2348 } 2349 2350 // 2. GENERAL CASE: 2351 // 2.1. (`fir.array`) 2352 // %box = ... : !fix.box<!fir.array<?xU>> 2353 // %idx = ... : index 2354 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2355 // 2.2 (`fir.derived`) 2356 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2357 // %idx = ... : i32 2358 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2359 // 2.3 (`fir.derived` inside `fir.array`) 2360 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2361 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2362 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2363 // 2.4. TODO: Either document or disable any other case that the following 2364 // implementation might convert. 2365 mlir::LLVM::ConstantOp c0 = 2366 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2367 mlir::Value resultAddr = 2368 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2369 boxBaseAddr, rewriter); 2370 // Component Type 2371 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2372 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2373 2374 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2375 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2376 if (i != 1) 2377 TODO(loc, "fir.array nested inside other array and/or derived type"); 2378 // Applies byte strides from the box. Ignore lower bound from box 2379 // since fir.coordinate_of indexes are zero based. Lowering takes care 2380 // of lower bound aspects. This both accounts for dynamically sized 2381 // types and non contiguous arrays. 2382 auto idxTy = lowerTy().indexType(); 2383 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2384 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2385 index < lastIndex; ++index) { 2386 mlir::Value stride = 2387 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2388 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2389 operands[index], stride); 2390 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2391 } 2392 auto voidPtrBase = 2393 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2394 llvm::SmallVector<mlir::Value> args{off}; 2395 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2396 voidPtrBase, args); 2397 i += arrTy.getDimension() - 1; 2398 cpnTy = arrTy.getEleTy(); 2399 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) { 2400 auto recRefTy = 2401 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2402 mlir::Value nxtOpnd = operands[i]; 2403 auto memObj = 2404 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2405 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2406 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2407 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); 2408 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2409 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2410 args); 2411 resultAddr = 2412 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2413 } else { 2414 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2415 } 2416 } 2417 2418 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2419 return mlir::success(); 2420 } 2421 2422 mlir::LogicalResult 2423 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2424 mlir::ValueRange operands, mlir::Location loc, 2425 mlir::ConversionPatternRewriter &rewriter) const { 2426 mlir::Type baseObjectTy = coor.getBaseType(); 2427 2428 // Component Type 2429 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2430 bool hasSubdimension = hasSubDimensions(cpnTy); 2431 bool columnIsDeferred = !hasSubdimension; 2432 2433 if (!supportedCoordinate(cpnTy, operands.drop_front(1))) 2434 TODO(loc, "unsupported combination of coordinate operands"); 2435 2436 const bool hasKnownShape = 2437 arraysHaveKnownShape(cpnTy, operands.drop_front(1)); 2438 2439 // If only the column is `?`, then we can simply place the column value in 2440 // the 0-th GEP position. 2441 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2442 if (!hasKnownShape) { 2443 const unsigned sz = arrTy.getDimension(); 2444 if (arraysHaveKnownShape(arrTy.getEleTy(), 2445 operands.drop_front(1 + sz))) { 2446 fir::SequenceType::ShapeRef shape = arrTy.getShape(); 2447 bool allConst = true; 2448 for (unsigned i = 0; i < sz - 1; ++i) { 2449 if (shape[i] < 0) { 2450 allConst = false; 2451 break; 2452 } 2453 } 2454 if (allConst) 2455 columnIsDeferred = true; 2456 } 2457 } 2458 } 2459 2460 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy))) 2461 return mlir::emitError( 2462 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2463 2464 if (hasKnownShape || columnIsDeferred) { 2465 llvm::SmallVector<mlir::Value> offs; 2466 if (hasKnownShape && hasSubdimension) { 2467 mlir::LLVM::ConstantOp c0 = 2468 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2469 offs.push_back(c0); 2470 } 2471 llvm::Optional<int> dims; 2472 llvm::SmallVector<mlir::Value> arrIdx; 2473 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { 2474 mlir::Value nxtOpnd = operands[i]; 2475 2476 if (!cpnTy) 2477 return mlir::emitError(loc, "invalid coordinate/check failed"); 2478 2479 // check if the i-th coordinate relates to an array 2480 if (dims.hasValue()) { 2481 arrIdx.push_back(nxtOpnd); 2482 int dimsLeft = *dims; 2483 if (dimsLeft > 1) { 2484 dims = dimsLeft - 1; 2485 continue; 2486 } 2487 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2488 // append array range in reverse (FIR arrays are column-major) 2489 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2490 arrIdx.clear(); 2491 dims.reset(); 2492 continue; 2493 } 2494 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2495 int d = arrTy.getDimension() - 1; 2496 if (d > 0) { 2497 dims = d; 2498 arrIdx.push_back(nxtOpnd); 2499 continue; 2500 } 2501 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2502 offs.push_back(nxtOpnd); 2503 continue; 2504 } 2505 2506 // check if the i-th coordinate relates to a field 2507 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) 2508 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2509 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>()) 2510 cpnTy = tupTy.getType(getIntValue(nxtOpnd)); 2511 else 2512 cpnTy = nullptr; 2513 2514 offs.push_back(nxtOpnd); 2515 } 2516 if (dims.hasValue()) 2517 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2518 mlir::Value base = operands[0]; 2519 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2520 rewriter.replaceOp(coor, retval); 2521 return mlir::success(); 2522 } 2523 2524 return mlir::emitError( 2525 loc, "fir.coordinate_of base operand has unsupported type"); 2526 } 2527 }; 2528 2529 /// Convert `fir.field_index`. The conversion depends on whether the size of 2530 /// the record is static or dynamic. 2531 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2532 using FIROpConversion::FIROpConversion; 2533 2534 // NB: most field references should be resolved by this point 2535 mlir::LogicalResult 2536 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2537 mlir::ConversionPatternRewriter &rewriter) const override { 2538 auto recTy = field.getOnType().cast<fir::RecordType>(); 2539 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2540 2541 if (!fir::hasDynamicSize(recTy)) { 2542 // Derived type has compile-time constant layout. Return index of the 2543 // component type in the parent type (to be used in GEP). 2544 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2545 field.getLoc(), rewriter, index)}); 2546 return mlir::success(); 2547 } 2548 2549 // Derived type has compile-time constant layout. Call the compiler 2550 // generated function to determine the byte offset of the field at runtime. 2551 // This returns a non-constant. 2552 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2553 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2554 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2555 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr( 2556 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2557 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2558 field, lowerTy().offsetType(), adaptor.getOperands(), 2559 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2560 return mlir::success(); 2561 } 2562 2563 // Re-Construct the name of the compiler generated method that calculates the 2564 // offset 2565 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2566 llvm::StringRef field) { 2567 return recTy.getName().str() + "P." + field.str() + ".offset"; 2568 } 2569 }; 2570 2571 /// Convert `fir.end` 2572 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2573 using FIROpConversion::FIROpConversion; 2574 2575 mlir::LogicalResult 2576 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2577 mlir::ConversionPatternRewriter &rewriter) const override { 2578 TODO(firEnd.getLoc(), "fir.end codegen"); 2579 return mlir::failure(); 2580 } 2581 }; 2582 2583 /// Lower `fir.gentypedesc` to a global constant. 2584 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2585 using FIROpConversion::FIROpConversion; 2586 2587 mlir::LogicalResult 2588 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2589 mlir::ConversionPatternRewriter &rewriter) const override { 2590 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2591 return mlir::failure(); 2592 } 2593 }; 2594 2595 /// Lower `fir.has_value` operation to `llvm.return` operation. 2596 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2597 using FIROpConversion::FIROpConversion; 2598 2599 mlir::LogicalResult 2600 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2601 mlir::ConversionPatternRewriter &rewriter) const override { 2602 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op, 2603 adaptor.getOperands()); 2604 return mlir::success(); 2605 } 2606 }; 2607 2608 /// Lower `fir.global` operation to `llvm.global` operation. 2609 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2610 /// if they are applied on the full range. 2611 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2612 using FIROpConversion::FIROpConversion; 2613 2614 mlir::LogicalResult 2615 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2616 mlir::ConversionPatternRewriter &rewriter) const override { 2617 auto tyAttr = convertType(global.getType()); 2618 if (global.getType().isa<fir::BoxType>()) 2619 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2620 auto loc = global.getLoc(); 2621 mlir::Attribute initAttr{}; 2622 if (global.getInitVal()) 2623 initAttr = global.getInitVal().getValue(); 2624 auto linkage = convertLinkage(global.getLinkName()); 2625 auto isConst = global.getConstant().hasValue(); 2626 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2627 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2628 auto &gr = g.getInitializerRegion(); 2629 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2630 if (!gr.empty()) { 2631 // Replace insert_on_range with a constant dense attribute if the 2632 // initialization is on the full range. 2633 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2634 for (auto insertOp : insertOnRangeOps) { 2635 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2636 auto seqTyAttr = convertType(insertOp.getType()); 2637 auto *op = insertOp.getVal().getDefiningOp(); 2638 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2639 if (!constant) { 2640 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2641 if (!convertOp) 2642 continue; 2643 constant = mlir::cast<mlir::arith::ConstantOp>( 2644 convertOp.getValue().getDefiningOp()); 2645 } 2646 mlir::Type vecType = mlir::VectorType::get( 2647 insertOp.getType().getShape(), constant.getType()); 2648 auto denseAttr = mlir::DenseElementsAttr::get( 2649 vecType.cast<mlir::ShapedType>(), constant.getValue()); 2650 rewriter.setInsertionPointAfter(insertOp); 2651 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2652 insertOp, seqTyAttr, denseAttr); 2653 } 2654 } 2655 } 2656 rewriter.eraseOp(global); 2657 return mlir::success(); 2658 } 2659 2660 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2661 fir::SequenceType seqTy) const { 2662 auto extents = seqTy.getShape(); 2663 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2664 return false; 2665 auto cur_index = indexes.value_begin<int64_t>(); 2666 for (unsigned i = 0; i < indexes.size(); i += 2) { 2667 if (*(cur_index++) != 0) 2668 return false; 2669 if (*(cur_index++) != extents[i / 2] - 1) 2670 return false; 2671 } 2672 return true; 2673 } 2674 2675 // TODO: String comparaison should be avoided. Replace linkName with an 2676 // enumeration. 2677 mlir::LLVM::Linkage 2678 convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const { 2679 if (optLinkage.hasValue()) { 2680 auto name = optLinkage.getValue(); 2681 if (name == "internal") 2682 return mlir::LLVM::Linkage::Internal; 2683 if (name == "linkonce") 2684 return mlir::LLVM::Linkage::Linkonce; 2685 if (name == "linkonce_odr") 2686 return mlir::LLVM::Linkage::LinkonceODR; 2687 if (name == "common") 2688 return mlir::LLVM::Linkage::Common; 2689 if (name == "weak") 2690 return mlir::LLVM::Linkage::Weak; 2691 } 2692 return mlir::LLVM::Linkage::External; 2693 } 2694 }; 2695 2696 /// `fir.load` --> `llvm.load` 2697 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2698 using FIROpConversion::FIROpConversion; 2699 2700 mlir::LogicalResult 2701 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2702 mlir::ConversionPatternRewriter &rewriter) const override { 2703 // fir.box is a special case because it is considered as an ssa values in 2704 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2705 // and fir.box end up being the same llvm types and loading a 2706 // fir.ref<fir.box> is actually a no op in LLVM. 2707 if (load.getType().isa<fir::BoxType>()) { 2708 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2709 } else { 2710 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2711 load, convertType(load.getType()), adaptor.getOperands(), 2712 load->getAttrs()); 2713 } 2714 return mlir::success(); 2715 } 2716 }; 2717 2718 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2719 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2720 /// math flags? 2721 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2722 using FIROpConversion::FIROpConversion; 2723 2724 mlir::LogicalResult 2725 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2726 mlir::ConversionPatternRewriter &rewriter) const override { 2727 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2728 return mlir::success(); 2729 } 2730 }; 2731 2732 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2733 llvm::Optional<mlir::ValueRange> destOps, 2734 mlir::ConversionPatternRewriter &rewriter, 2735 mlir::Block *newBlock) { 2736 if (destOps.hasValue()) 2737 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 2738 newBlock, mlir::ValueRange()); 2739 else 2740 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2741 } 2742 2743 template <typename A, typename B> 2744 static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps, 2745 mlir::ConversionPatternRewriter &rewriter) { 2746 if (destOps.hasValue()) 2747 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 2748 dest); 2749 else 2750 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2751 } 2752 2753 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2754 mlir::Block *dest, 2755 llvm::Optional<mlir::ValueRange> destOps, 2756 mlir::ConversionPatternRewriter &rewriter) { 2757 auto *thisBlock = rewriter.getInsertionBlock(); 2758 auto *newBlock = createBlock(rewriter, dest); 2759 rewriter.setInsertionPointToEnd(thisBlock); 2760 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2761 rewriter.setInsertionPointToEnd(newBlock); 2762 } 2763 2764 /// Conversion of `fir.select_case` 2765 /// 2766 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2767 /// Depending on the case condition type, one or several comparison and 2768 /// conditional branching can be generated. 2769 /// 2770 /// A a point value case such as `case(4)`, a lower bound case such as 2771 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2772 /// simple comparison between the selector value and the constant value in the 2773 /// case. The block associated with the case condition is then executed if 2774 /// the comparison succeed otherwise it branch to the next block with the 2775 /// comparison for the the next case conditon. 2776 /// 2777 /// A closed interval case condition such as `case(7:10)` is converted with a 2778 /// first comparison and conditional branching for the lower bound. If 2779 /// successful, it branch to a second block with the comparison for the 2780 /// upper bound in the same case condition. 2781 /// 2782 /// TODO: lowering of CHARACTER type cases is not handled yet. 2783 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2784 using FIROpConversion::FIROpConversion; 2785 2786 mlir::LogicalResult 2787 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2788 mlir::ConversionPatternRewriter &rewriter) const override { 2789 unsigned conds = caseOp.getNumConditions(); 2790 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2791 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2792 auto ty = caseOp.getSelector().getType(); 2793 if (ty.isa<fir::CharacterType>()) { 2794 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2795 return mlir::failure(); 2796 } 2797 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2798 auto loc = caseOp.getLoc(); 2799 for (unsigned t = 0; t != conds; ++t) { 2800 mlir::Block *dest = caseOp.getSuccessor(t); 2801 llvm::Optional<mlir::ValueRange> destOps = 2802 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2803 llvm::Optional<mlir::ValueRange> cmpOps = 2804 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2805 mlir::Value caseArg = *(cmpOps.getValue().begin()); 2806 mlir::Attribute attr = cases[t]; 2807 if (attr.isa<fir::PointIntervalAttr>()) { 2808 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2809 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2810 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2811 continue; 2812 } 2813 if (attr.isa<fir::LowerBoundAttr>()) { 2814 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2815 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2816 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2817 continue; 2818 } 2819 if (attr.isa<fir::UpperBoundAttr>()) { 2820 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2821 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2822 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2823 continue; 2824 } 2825 if (attr.isa<fir::ClosedIntervalAttr>()) { 2826 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2827 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2828 auto *thisBlock = rewriter.getInsertionBlock(); 2829 auto *newBlock1 = createBlock(rewriter, dest); 2830 auto *newBlock2 = createBlock(rewriter, dest); 2831 rewriter.setInsertionPointToEnd(thisBlock); 2832 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2833 rewriter.setInsertionPointToEnd(newBlock1); 2834 mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1); 2835 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2836 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2837 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2838 rewriter.setInsertionPointToEnd(newBlock2); 2839 continue; 2840 } 2841 assert(attr.isa<mlir::UnitAttr>()); 2842 assert((t + 1 == conds) && "unit must be last"); 2843 genBrOp(caseOp, dest, destOps, rewriter); 2844 } 2845 return mlir::success(); 2846 } 2847 }; 2848 2849 template <typename OP> 2850 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2851 typename OP::Adaptor adaptor, 2852 mlir::ConversionPatternRewriter &rewriter) { 2853 unsigned conds = select.getNumConditions(); 2854 auto cases = select.getCases().getValue(); 2855 mlir::Value selector = adaptor.getSelector(); 2856 auto loc = select.getLoc(); 2857 assert(conds > 0 && "select must have cases"); 2858 2859 llvm::SmallVector<mlir::Block *> destinations; 2860 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2861 mlir::Block *defaultDestination; 2862 mlir::ValueRange defaultOperands; 2863 llvm::SmallVector<int32_t> caseValues; 2864 2865 for (unsigned t = 0; t != conds; ++t) { 2866 mlir::Block *dest = select.getSuccessor(t); 2867 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2868 const mlir::Attribute &attr = cases[t]; 2869 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2870 destinations.push_back(dest); 2871 destinationsOperands.push_back(destOps.hasValue() ? *destOps 2872 : mlir::ValueRange{}); 2873 caseValues.push_back(intAttr.getInt()); 2874 continue; 2875 } 2876 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2877 assert((t + 1 == conds) && "unit must be last"); 2878 defaultDestination = dest; 2879 defaultOperands = destOps.hasValue() ? *destOps : mlir::ValueRange{}; 2880 } 2881 2882 // LLVM::SwitchOp takes a i32 type for the selector. 2883 if (select.getSelector().getType() != rewriter.getI32Type()) 2884 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(), 2885 selector); 2886 2887 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2888 select, selector, 2889 /*defaultDestination=*/defaultDestination, 2890 /*defaultOperands=*/defaultOperands, 2891 /*caseValues=*/caseValues, 2892 /*caseDestinations=*/destinations, 2893 /*caseOperands=*/destinationsOperands, 2894 /*branchWeights=*/llvm::ArrayRef<std::int32_t>()); 2895 } 2896 2897 /// conversion of fir::SelectOp to an if-then-else ladder 2898 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2899 using FIROpConversion::FIROpConversion; 2900 2901 mlir::LogicalResult 2902 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2903 mlir::ConversionPatternRewriter &rewriter) const override { 2904 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2905 return mlir::success(); 2906 } 2907 }; 2908 2909 /// conversion of fir::SelectRankOp to an if-then-else ladder 2910 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2911 using FIROpConversion::FIROpConversion; 2912 2913 mlir::LogicalResult 2914 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2915 mlir::ConversionPatternRewriter &rewriter) const override { 2916 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2917 return mlir::success(); 2918 } 2919 }; 2920 2921 /// Lower `fir.select_type` to LLVM IR dialect. 2922 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2923 using FIROpConversion::FIROpConversion; 2924 2925 mlir::LogicalResult 2926 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2927 mlir::ConversionPatternRewriter &rewriter) const override { 2928 mlir::emitError(select.getLoc(), 2929 "fir.select_type should have already been converted"); 2930 return mlir::failure(); 2931 } 2932 }; 2933 2934 /// `fir.store` --> `llvm.store` 2935 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2936 using FIROpConversion::FIROpConversion; 2937 2938 mlir::LogicalResult 2939 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2940 mlir::ConversionPatternRewriter &rewriter) const override { 2941 if (store.getValue().getType().isa<fir::BoxType>()) { 2942 // fir.box value is actually in memory, load it first before storing it. 2943 mlir::Location loc = store.getLoc(); 2944 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2945 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2946 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2947 adaptor.getOperands()[0]); 2948 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2949 store, val, adaptor.getOperands()[1]); 2950 } else { 2951 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2952 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2953 } 2954 return mlir::success(); 2955 } 2956 }; 2957 2958 namespace { 2959 2960 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2961 /// the character buffer and one for the buffer length. 2962 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2963 using FIROpConversion::FIROpConversion; 2964 2965 mlir::LogicalResult 2966 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2967 mlir::ConversionPatternRewriter &rewriter) const override { 2968 auto *ctx = unboxchar.getContext(); 2969 2970 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2971 mlir::Value tuple = adaptor.getOperands()[0]; 2972 mlir::Type tupleTy = tuple.getType(); 2973 2974 mlir::Location loc = unboxchar.getLoc(); 2975 mlir::Value ptrToBuffer = 2976 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2977 2978 mlir::LLVM::ExtractValueOp len = 2979 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2980 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2981 2982 rewriter.replaceOp(unboxchar, 2983 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2984 return mlir::success(); 2985 } 2986 }; 2987 2988 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 2989 /// components. 2990 /// TODO: Part of supporting Fortran 2003 procedure pointers. 2991 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 2992 using FIROpConversion::FIROpConversion; 2993 2994 mlir::LogicalResult 2995 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 2996 mlir::ConversionPatternRewriter &rewriter) const override { 2997 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 2998 return mlir::failure(); 2999 } 3000 }; 3001 3002 /// convert to LLVM IR dialect `undef` 3003 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 3004 using FIROpConversion::FIROpConversion; 3005 3006 mlir::LogicalResult 3007 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3008 mlir::ConversionPatternRewriter &rewriter) const override { 3009 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3010 undef, convertType(undef.getType())); 3011 return mlir::success(); 3012 } 3013 }; 3014 3015 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3016 using FIROpConversion::FIROpConversion; 3017 3018 mlir::LogicalResult 3019 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3020 mlir::ConversionPatternRewriter &rewriter) const override { 3021 mlir::Type ty = convertType(zero.getType()); 3022 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3023 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3024 } else if (ty.isa<mlir::IntegerType>()) { 3025 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3026 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3027 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3028 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3029 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3030 } else { 3031 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3032 return rewriter.notifyMatchFailure( 3033 zero, 3034 "conversion of fir.zero with aggregate type not implemented yet"); 3035 } 3036 return mlir::success(); 3037 } 3038 }; 3039 3040 /// `fir.unreachable` --> `llvm.unreachable` 3041 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3042 using FIROpConversion::FIROpConversion; 3043 3044 mlir::LogicalResult 3045 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3046 mlir::ConversionPatternRewriter &rewriter) const override { 3047 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3048 return mlir::success(); 3049 } 3050 }; 3051 3052 /// `fir.is_present` --> 3053 /// ``` 3054 /// %0 = llvm.mlir.constant(0 : i64) 3055 /// %1 = llvm.ptrtoint %0 3056 /// %2 = llvm.icmp "ne" %1, %0 : i64 3057 /// ``` 3058 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3059 using FIROpConversion::FIROpConversion; 3060 3061 mlir::LogicalResult 3062 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3063 mlir::ConversionPatternRewriter &rewriter) const override { 3064 mlir::Type idxTy = lowerTy().indexType(); 3065 mlir::Location loc = isPresent.getLoc(); 3066 auto ptr = adaptor.getOperands()[0]; 3067 3068 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3069 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3070 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3071 3072 mlir::Type ty = structTy.getBody()[0]; 3073 mlir::MLIRContext *ctx = isPresent.getContext(); 3074 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3075 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3076 } 3077 mlir::LLVM::ConstantOp c0 = 3078 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3079 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3080 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3081 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3082 3083 return mlir::success(); 3084 } 3085 }; 3086 3087 /// Create value signaling an absent optional argument in a call, e.g. 3088 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3089 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3090 using FIROpConversion::FIROpConversion; 3091 3092 mlir::LogicalResult 3093 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3094 mlir::ConversionPatternRewriter &rewriter) const override { 3095 mlir::Type ty = convertType(absent.getType()); 3096 mlir::Location loc = absent.getLoc(); 3097 3098 if (absent.getType().isa<fir::BoxCharType>()) { 3099 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3100 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3101 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3102 auto nullField = 3103 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3104 mlir::MLIRContext *ctx = absent.getContext(); 3105 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3106 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3107 absent, ty, undefStruct, nullField, c0); 3108 } else { 3109 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3110 } 3111 return mlir::success(); 3112 } 3113 }; 3114 3115 // 3116 // Primitive operations on Complex types 3117 // 3118 3119 /// Generate inline code for complex addition/subtraction 3120 template <typename LLVMOP, typename OPTY> 3121 static mlir::LLVM::InsertValueOp 3122 complexSum(OPTY sumop, mlir::ValueRange opnds, 3123 mlir::ConversionPatternRewriter &rewriter, 3124 fir::LLVMTypeConverter &lowering) { 3125 mlir::Value a = opnds[0]; 3126 mlir::Value b = opnds[1]; 3127 auto loc = sumop.getLoc(); 3128 auto ctx = sumop.getContext(); 3129 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3130 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3131 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3132 mlir::Type ty = lowering.convertType(sumop.getType()); 3133 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3134 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3135 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3136 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3137 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3138 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3139 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3140 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3141 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3142 } 3143 } // namespace 3144 3145 namespace { 3146 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3147 using FIROpConversion::FIROpConversion; 3148 3149 mlir::LogicalResult 3150 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3151 mlir::ConversionPatternRewriter &rewriter) const override { 3152 // given: (x + iy) + (x' + iy') 3153 // result: (x + x') + i(y + y') 3154 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3155 rewriter, lowerTy()); 3156 rewriter.replaceOp(addc, r.getResult()); 3157 return mlir::success(); 3158 } 3159 }; 3160 3161 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3162 using FIROpConversion::FIROpConversion; 3163 3164 mlir::LogicalResult 3165 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3166 mlir::ConversionPatternRewriter &rewriter) const override { 3167 // given: (x + iy) - (x' + iy') 3168 // result: (x - x') + i(y - y') 3169 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3170 rewriter, lowerTy()); 3171 rewriter.replaceOp(subc, r.getResult()); 3172 return mlir::success(); 3173 } 3174 }; 3175 3176 /// Inlined complex multiply 3177 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3178 using FIROpConversion::FIROpConversion; 3179 3180 mlir::LogicalResult 3181 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3182 mlir::ConversionPatternRewriter &rewriter) const override { 3183 // TODO: Can we use a call to __muldc3 ? 3184 // given: (x + iy) * (x' + iy') 3185 // result: (xx'-yy')+i(xy'+yx') 3186 mlir::Value a = adaptor.getOperands()[0]; 3187 mlir::Value b = adaptor.getOperands()[1]; 3188 auto loc = mulc.getLoc(); 3189 auto *ctx = mulc.getContext(); 3190 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3191 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3192 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3193 mlir::Type ty = convertType(mulc.getType()); 3194 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3195 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3196 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3197 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3198 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3199 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3200 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3201 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3202 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3203 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3204 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3205 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3206 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3207 rewriter.replaceOp(mulc, r0.getResult()); 3208 return mlir::success(); 3209 } 3210 }; 3211 3212 /// Inlined complex division 3213 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3214 using FIROpConversion::FIROpConversion; 3215 3216 mlir::LogicalResult 3217 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3218 mlir::ConversionPatternRewriter &rewriter) const override { 3219 // TODO: Can we use a call to __divdc3 instead? 3220 // Just generate inline code for now. 3221 // given: (x + iy) / (x' + iy') 3222 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3223 mlir::Value a = adaptor.getOperands()[0]; 3224 mlir::Value b = adaptor.getOperands()[1]; 3225 auto loc = divc.getLoc(); 3226 auto *ctx = divc.getContext(); 3227 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3228 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3229 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3230 mlir::Type ty = convertType(divc.getType()); 3231 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3232 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3233 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3234 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3235 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3236 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3237 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3238 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3239 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3240 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3241 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3242 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3243 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3244 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3245 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3246 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3247 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3248 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3249 rewriter.replaceOp(divc, r0.getResult()); 3250 return mlir::success(); 3251 } 3252 }; 3253 3254 /// Inlined complex negation 3255 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3256 using FIROpConversion::FIROpConversion; 3257 3258 mlir::LogicalResult 3259 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3260 mlir::ConversionPatternRewriter &rewriter) const override { 3261 // given: -(x + iy) 3262 // result: -x - iy 3263 auto *ctxt = neg.getContext(); 3264 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3265 auto ty = convertType(neg.getType()); 3266 auto loc = neg.getLoc(); 3267 mlir::Value o0 = adaptor.getOperands()[0]; 3268 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3269 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3270 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3271 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3272 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3273 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3274 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3275 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3276 return mlir::success(); 3277 } 3278 }; 3279 3280 /// Conversion pattern for operation that must be dead. The information in these 3281 /// operations is used by other operation. At this point they should not have 3282 /// anymore uses. 3283 /// These operations are normally dead after the pre-codegen pass. 3284 template <typename FromOp> 3285 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3286 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3287 const fir::FIRToLLVMPassOptions &options) 3288 : FIROpConversion<FromOp>(lowering, options) {} 3289 using OpAdaptor = typename FromOp::Adaptor; 3290 3291 mlir::LogicalResult 3292 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3293 mlir::ConversionPatternRewriter &rewriter) const final { 3294 if (!op->getUses().empty()) 3295 return rewriter.notifyMatchFailure(op, "op must be dead"); 3296 rewriter.eraseOp(op); 3297 return mlir::success(); 3298 } 3299 }; 3300 3301 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3302 using MustBeDeadConversion::MustBeDeadConversion; 3303 }; 3304 3305 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3306 using MustBeDeadConversion::MustBeDeadConversion; 3307 }; 3308 3309 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3310 using MustBeDeadConversion::MustBeDeadConversion; 3311 }; 3312 3313 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3314 using MustBeDeadConversion::MustBeDeadConversion; 3315 }; 3316 3317 } // namespace 3318 3319 namespace { 3320 /// Convert FIR dialect to LLVM dialect 3321 /// 3322 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3323 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3324 /// 3325 /// This pass is not complete yet. We are upstreaming it in small patches. 3326 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3327 public: 3328 FIRToLLVMLowering() = default; 3329 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3330 mlir::ModuleOp getModule() { return getOperation(); } 3331 3332 void runOnOperation() override final { 3333 auto mod = getModule(); 3334 if (!forcedTargetTriple.empty()) 3335 fir::setTargetTriple(mod, forcedTargetTriple); 3336 3337 auto *context = getModule().getContext(); 3338 fir::LLVMTypeConverter typeConverter{getModule()}; 3339 mlir::RewritePatternSet pattern(context); 3340 pattern.insert< 3341 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3342 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3343 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3344 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3345 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3346 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3347 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3348 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3349 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3350 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3351 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3352 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3353 InsertValueOpConversion, IsPresentOpConversion, 3354 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3355 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3356 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3357 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3358 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3359 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3360 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3361 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3362 options); 3363 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3364 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3365 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3366 pattern); 3367 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3368 pattern); 3369 mlir::ConversionTarget target{*context}; 3370 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3371 // The OpenMP dialect is legal for Operations without regions, for those 3372 // which contains regions it is legal if the region contains only the 3373 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and 3374 // legalize conversion of OpenMP operations without regions. 3375 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter); 3376 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3377 3378 // required NOPs for applying a full conversion 3379 target.addLegalOp<mlir::ModuleOp>(); 3380 3381 // apply the patterns 3382 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3383 std::move(pattern)))) { 3384 signalPassFailure(); 3385 } 3386 } 3387 3388 private: 3389 fir::FIRToLLVMPassOptions options; 3390 }; 3391 3392 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3393 struct LLVMIRLoweringPass 3394 : public mlir::PassWrapper<LLVMIRLoweringPass, 3395 mlir::OperationPass<mlir::ModuleOp>> { 3396 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass) 3397 3398 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p) 3399 : output{output}, printer{p} {} 3400 3401 mlir::ModuleOp getModule() { return getOperation(); } 3402 3403 void runOnOperation() override final { 3404 auto *ctx = getModule().getContext(); 3405 auto optName = getModule().getName(); 3406 llvm::LLVMContext llvmCtx; 3407 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3408 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3409 printer(*llvmModule, output); 3410 return; 3411 } 3412 3413 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3414 signalPassFailure(); 3415 } 3416 3417 private: 3418 llvm::raw_ostream &output; 3419 fir::LLVMIRLoweringPrinter printer; 3420 }; 3421 3422 } // namespace 3423 3424 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3425 return std::make_unique<FIRToLLVMLowering>(); 3426 } 3427 3428 std::unique_ptr<mlir::Pass> 3429 fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) { 3430 return std::make_unique<FIRToLLVMLowering>(options); 3431 } 3432 3433 std::unique_ptr<mlir::Pass> 3434 fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output, 3435 fir::LLVMIRLoweringPrinter printer) { 3436 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3437 } 3438