1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 27 #include "mlir/IR/BuiltinTypes.h" 28 #include "mlir/IR/Matchers.h" 29 #include "mlir/Pass/Pass.h" 30 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 31 #include "llvm/ADT/ArrayRef.h" 32 33 #define DEBUG_TYPE "flang-codegen" 34 35 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 36 #include "TypeConverter.h" 37 38 // TODO: This should really be recovered from the specified target. 39 static constexpr unsigned defaultAlign = 8; 40 41 /// `fir.box` attribute values as defined for CFI_attribute_t in 42 /// flang/ISO_Fortran_binding.h. 43 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 44 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 45 46 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 47 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 48 } 49 50 static mlir::LLVM::ConstantOp 51 genConstantIndex(mlir::Location loc, mlir::Type ity, 52 mlir::ConversionPatternRewriter &rewriter, 53 std::int64_t offset) { 54 auto cattr = rewriter.getI64IntegerAttr(offset); 55 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 56 } 57 58 static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 59 mlir::Block *insertBefore) { 60 assert(insertBefore && "expected valid insertion block"); 61 return rewriter.createBlock(insertBefore->getParent(), 62 mlir::Region::iterator(insertBefore)); 63 } 64 65 namespace { 66 /// FIR conversion pattern template 67 template <typename FromOp> 68 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 69 public: 70 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 71 const fir::FIRToLLVMPassOptions &options) 72 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 73 74 protected: 75 mlir::Type convertType(mlir::Type ty) const { 76 return lowerTy().convertType(ty); 77 } 78 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 79 80 mlir::Type getVoidPtrType() const { 81 return mlir::LLVM::LLVMPointerType::get( 82 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 83 } 84 85 mlir::LLVM::ConstantOp 86 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 87 int value) const { 88 mlir::Type i32Ty = rewriter.getI32Type(); 89 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 90 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 91 } 92 93 mlir::LLVM::ConstantOp 94 genConstantOffset(mlir::Location loc, 95 mlir::ConversionPatternRewriter &rewriter, 96 int offset) const { 97 mlir::Type ity = lowerTy().offsetType(); 98 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 99 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 100 } 101 102 /// Perform an extension or truncation as needed on an integer value. Lowering 103 /// to the specific target may involve some sign-extending or truncation of 104 /// values, particularly to fit them from abstract box types to the 105 /// appropriate reified structures. 106 mlir::Value integerCast(mlir::Location loc, 107 mlir::ConversionPatternRewriter &rewriter, 108 mlir::Type ty, mlir::Value val) const { 109 auto valTy = val.getType(); 110 // If the value was not yet lowered, lower its type so that it can 111 // be used in getPrimitiveTypeSizeInBits. 112 if (!valTy.isa<mlir::IntegerType>()) 113 valTy = convertType(valTy); 114 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 115 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 116 if (toSize < fromSize) 117 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 118 if (toSize > fromSize) 119 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 120 return val; 121 } 122 123 /// Construct code sequence to extract the specifc value from a `fir.box`. 124 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 125 mlir::Type resultTy, 126 mlir::ConversionPatternRewriter &rewriter, 127 unsigned boxValue) const { 128 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 129 mlir::LLVM::ConstantOp cValuePos = 130 genConstantOffset(loc, rewriter, boxValue); 131 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 132 auto p = rewriter.create<mlir::LLVM::GEPOp>( 133 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 134 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 135 } 136 137 /// Method to construct code sequence to get the triple for dimension `dim` 138 /// from a box. 139 llvm::SmallVector<mlir::Value, 3> 140 getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, 141 mlir::Value box, mlir::Value dim, 142 mlir::ConversionPatternRewriter &rewriter) const { 143 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 144 mlir::LLVM::ConstantOp cDims = 145 genConstantOffset(loc, rewriter, kDimsPosInBox); 146 mlir::LLVM::LoadOp l0 = 147 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 148 mlir::LLVM::LoadOp l1 = 149 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 150 mlir::LLVM::LoadOp l2 = 151 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 152 return {l0.getResult(), l1.getResult(), l2.getResult()}; 153 } 154 155 mlir::LLVM::LoadOp 156 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 157 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 158 mlir::Type ty, 159 mlir::ConversionPatternRewriter &rewriter) const { 160 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 161 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 162 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 163 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 164 } 165 166 mlir::Value 167 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 168 mlir::ConversionPatternRewriter &rewriter) const { 169 auto idxTy = lowerTy().indexType(); 170 auto c0 = genConstantOffset(loc, rewriter, 0); 171 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 172 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 173 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 174 rewriter); 175 } 176 177 /// Read base address from a fir.box. Returned address has type ty. 178 mlir::Value 179 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 180 mlir::ConversionPatternRewriter &rewriter) const { 181 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 182 mlir::LLVM::ConstantOp cAddr = 183 genConstantOffset(loc, rewriter, kAddrPosInBox); 184 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 185 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 186 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 187 } 188 189 mlir::Value 190 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 191 mlir::ConversionPatternRewriter &rewriter) const { 192 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 193 mlir::LLVM::ConstantOp cElemLen = 194 genConstantOffset(loc, rewriter, kElemLenPosInBox); 195 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 196 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 197 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 198 } 199 200 // Get the element type given an LLVM type that is of the form 201 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 202 static mlir::Type getBoxEleTy(mlir::Type type, 203 llvm::ArrayRef<unsigned> indexes) { 204 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 205 type = t.getElementType(); 206 for (auto i : indexes) { 207 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 208 assert(!t.isOpaque() && i < t.getBody().size()); 209 type = t.getBody()[i]; 210 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 211 type = t.getElementType(); 212 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 213 type = t.getElementType(); 214 } else { 215 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 216 "request for invalid box element type"); 217 } 218 } 219 return type; 220 } 221 222 // Return LLVM type of the base address given the LLVM type 223 // of the related descriptor (lowered fir.box type). 224 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 225 return getBoxEleTy(type, {kAddrPosInBox}); 226 } 227 228 // Load the attribute from the \p box and perform a check against \p maskValue 229 // The final comparison is implemented as `(attribute & maskValue) != 0`. 230 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 231 mlir::ConversionPatternRewriter &rewriter, 232 unsigned maskValue) const { 233 mlir::Type attrTy = rewriter.getI32Type(); 234 mlir::Value attribute = 235 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 236 mlir::LLVM::ConstantOp attrMask = 237 genConstantOffset(loc, rewriter, maskValue); 238 auto maskRes = 239 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 240 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 241 return rewriter.create<mlir::LLVM::ICmpOp>( 242 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 243 } 244 245 template <typename... ARGS> 246 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 247 mlir::ConversionPatternRewriter &rewriter, 248 mlir::Value base, ARGS... args) const { 249 llvm::SmallVector<mlir::Value> cv{args...}; 250 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 251 } 252 253 fir::LLVMTypeConverter &lowerTy() const { 254 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 255 } 256 257 const fir::FIRToLLVMPassOptions &options; 258 }; 259 260 /// FIR conversion pattern template 261 template <typename FromOp> 262 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 263 public: 264 using FIROpConversion<FromOp>::FIROpConversion; 265 using OpAdaptor = typename FromOp::Adaptor; 266 267 mlir::LogicalResult 268 matchAndRewrite(FromOp op, OpAdaptor adaptor, 269 mlir::ConversionPatternRewriter &rewriter) const final { 270 mlir::Type ty = this->convertType(op.getType()); 271 return doRewrite(op, ty, adaptor, rewriter); 272 } 273 274 virtual mlir::LogicalResult 275 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 276 mlir::ConversionPatternRewriter &rewriter) const = 0; 277 }; 278 279 // Lower `fir.address_of` operation to `llvm.address_of` operation. 280 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 281 using FIROpConversion::FIROpConversion; 282 283 mlir::LogicalResult 284 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 285 mlir::ConversionPatternRewriter &rewriter) const override { 286 auto ty = convertType(addr.getType()); 287 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 288 addr, ty, addr.getSymbol().getRootReference().getValue()); 289 return mlir::success(); 290 } 291 }; 292 } // namespace 293 294 /// Lookup the function to compute the memory size of this parametric derived 295 /// type. The size of the object may depend on the LEN type parameters of the 296 /// derived type. 297 static mlir::LLVM::LLVMFuncOp 298 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 299 mlir::ConversionPatternRewriter &rewriter) { 300 auto module = op->getParentOfType<mlir::ModuleOp>(); 301 std::string name = recTy.getName().str() + "P.mem.size"; 302 return module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name); 303 } 304 305 // Compute the alloc scale size (constant factors encoded in the array type). 306 // We do this for arrays without a constant interior or arrays of character with 307 // dynamic length arrays, since those are the only ones that get decayed to a 308 // pointer to the element type. 309 template <typename OP> 310 static mlir::Value 311 genAllocationScaleSize(OP op, mlir::Type ity, 312 mlir::ConversionPatternRewriter &rewriter) { 313 mlir::Location loc = op.getLoc(); 314 mlir::Type dataTy = op.getInType(); 315 mlir::Type scalarType = fir::unwrapSequenceType(dataTy); 316 auto seqTy = dataTy.dyn_cast<fir::SequenceType>(); 317 if ((op.hasShapeOperands() && seqTy && !seqTy.hasConstantInterior()) || 318 (seqTy && fir::characterWithDynamicLen(scalarType))) { 319 fir::SequenceType::Extent constSize = 1; 320 for (auto extent : seqTy.getShape()) 321 if (extent != fir::SequenceType::getUnknownExtent()) 322 constSize *= extent; 323 if (constSize != 1) { 324 mlir::Value constVal{ 325 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 326 return constVal; 327 } 328 } 329 return nullptr; 330 } 331 332 namespace { 333 /// convert to LLVM IR dialect `alloca` 334 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 335 using FIROpConversion::FIROpConversion; 336 337 mlir::LogicalResult 338 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 339 mlir::ConversionPatternRewriter &rewriter) const override { 340 mlir::ValueRange operands = adaptor.getOperands(); 341 auto loc = alloc.getLoc(); 342 mlir::Type ity = lowerTy().indexType(); 343 unsigned i = 0; 344 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 345 mlir::Type ty = convertType(alloc.getType()); 346 mlir::Type resultTy = ty; 347 if (alloc.hasLenParams()) { 348 unsigned end = alloc.numLenParams(); 349 llvm::SmallVector<mlir::Value> lenParams; 350 for (; i < end; ++i) 351 lenParams.push_back(operands[i]); 352 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 353 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 354 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 355 chrTy.getContext(), chrTy.getFKind()); 356 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 357 assert(end == 1); 358 size = integerCast(loc, rewriter, ity, lenParams[0]); 359 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 360 mlir::LLVM::LLVMFuncOp memSizeFn = 361 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 362 if (!memSizeFn) 363 emitError(loc, "did not find allocation function"); 364 mlir::NamedAttribute attr = rewriter.getNamedAttr( 365 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 366 auto call = rewriter.create<mlir::LLVM::CallOp>( 367 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 368 size = call.getResult(0); 369 ty = mlir::LLVM::LLVMPointerType::get( 370 mlir::IntegerType::get(alloc.getContext(), 8)); 371 } else { 372 return emitError(loc, "unexpected type ") 373 << scalarType << " with type parameters"; 374 } 375 } 376 if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter)) 377 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 378 if (alloc.hasShapeOperands()) { 379 unsigned end = operands.size(); 380 for (; i < end; ++i) 381 size = rewriter.create<mlir::LLVM::MulOp>( 382 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 383 } 384 if (ty == resultTy) { 385 // Do not emit the bitcast if ty and resultTy are the same. 386 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 387 alloc->getAttrs()); 388 } else { 389 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 390 alloc->getAttrs()); 391 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 392 } 393 return mlir::success(); 394 } 395 }; 396 } // namespace 397 398 /// Construct an `llvm.extractvalue` instruction. It will return value at 399 /// element \p x from \p tuple. 400 static mlir::LLVM::ExtractValueOp 401 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 402 mlir::ConversionPatternRewriter &rewriter, 403 mlir::MLIRContext *ctx, int x) { 404 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 405 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 406 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 407 } 408 409 namespace { 410 /// Lower `fir.box_addr` to the sequence of operations to extract the first 411 /// element of the box. 412 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 413 using FIROpConversion::FIROpConversion; 414 415 mlir::LogicalResult 416 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 417 mlir::ConversionPatternRewriter &rewriter) const override { 418 mlir::Value a = adaptor.getOperands()[0]; 419 auto loc = boxaddr.getLoc(); 420 mlir::Type ty = convertType(boxaddr.getType()); 421 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 422 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 423 } else { 424 auto c0attr = rewriter.getI32IntegerAttr(0); 425 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 426 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 427 c0); 428 } 429 return mlir::success(); 430 } 431 }; 432 433 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 434 /// boxchar. 435 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 436 using FIROpConversion::FIROpConversion; 437 438 mlir::LogicalResult 439 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 440 mlir::ConversionPatternRewriter &rewriter) const override { 441 mlir::Value boxChar = adaptor.getOperands()[0]; 442 mlir::Location loc = boxChar.getLoc(); 443 mlir::MLIRContext *ctx = boxChar.getContext(); 444 mlir::Type returnValTy = boxCharLen.getResult().getType(); 445 446 constexpr int boxcharLenIdx = 1; 447 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 448 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 449 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 450 rewriter.replaceOp(boxCharLen, lenAfterCast); 451 452 return mlir::success(); 453 } 454 }; 455 456 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 457 /// dimension infomartion from the boxed value. 458 /// Result in a triple set of GEPs and loads. 459 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 460 using FIROpConversion::FIROpConversion; 461 462 mlir::LogicalResult 463 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 464 mlir::ConversionPatternRewriter &rewriter) const override { 465 llvm::SmallVector<mlir::Type, 3> resultTypes = { 466 convertType(boxdims.getResult(0).getType()), 467 convertType(boxdims.getResult(1).getType()), 468 convertType(boxdims.getResult(2).getType()), 469 }; 470 auto results = 471 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 472 adaptor.getOperands()[1], rewriter); 473 rewriter.replaceOp(boxdims, results); 474 return mlir::success(); 475 } 476 }; 477 478 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 479 /// an element in the boxed value. 480 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 481 using FIROpConversion::FIROpConversion; 482 483 mlir::LogicalResult 484 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 485 mlir::ConversionPatternRewriter &rewriter) const override { 486 mlir::Value a = adaptor.getOperands()[0]; 487 auto loc = boxelesz.getLoc(); 488 auto ty = convertType(boxelesz.getType()); 489 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 490 rewriter.replaceOp(boxelesz, elemSize); 491 return mlir::success(); 492 } 493 }; 494 495 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 496 /// boxed value was from an ALLOCATABLE entity. 497 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 498 using FIROpConversion::FIROpConversion; 499 500 mlir::LogicalResult 501 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 502 mlir::ConversionPatternRewriter &rewriter) const override { 503 mlir::Value box = adaptor.getOperands()[0]; 504 auto loc = boxisalloc.getLoc(); 505 mlir::Value check = 506 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 507 rewriter.replaceOp(boxisalloc, check); 508 return mlir::success(); 509 } 510 }; 511 512 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 513 /// boxed is an array. 514 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 515 using FIROpConversion::FIROpConversion; 516 517 mlir::LogicalResult 518 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 519 mlir::ConversionPatternRewriter &rewriter) const override { 520 mlir::Value a = adaptor.getOperands()[0]; 521 auto loc = boxisarray.getLoc(); 522 auto rank = 523 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 524 auto c0 = genConstantOffset(loc, rewriter, 0); 525 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 526 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 527 return mlir::success(); 528 } 529 }; 530 531 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 532 /// boxed value was from a POINTER entity. 533 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 534 using FIROpConversion::FIROpConversion; 535 536 mlir::LogicalResult 537 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 538 mlir::ConversionPatternRewriter &rewriter) const override { 539 mlir::Value box = adaptor.getOperands()[0]; 540 auto loc = boxisptr.getLoc(); 541 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 542 rewriter.replaceOp(boxisptr, check); 543 return mlir::success(); 544 } 545 }; 546 547 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 548 /// the box. 549 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 550 using FIROpConversion::FIROpConversion; 551 552 mlir::LogicalResult 553 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 554 mlir::ConversionPatternRewriter &rewriter) const override { 555 mlir::Value a = adaptor.getOperands()[0]; 556 auto loc = boxrank.getLoc(); 557 mlir::Type ty = convertType(boxrank.getType()); 558 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 559 rewriter.replaceOp(boxrank, result); 560 return mlir::success(); 561 } 562 }; 563 564 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 565 /// boxproc. 566 /// TODO: Part of supporting Fortran 2003 procedure pointers. 567 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 568 using FIROpConversion::FIROpConversion; 569 570 mlir::LogicalResult 571 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 572 mlir::ConversionPatternRewriter &rewriter) const override { 573 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 574 return mlir::failure(); 575 } 576 }; 577 578 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 579 /// descriptor from the box. 580 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 581 using FIROpConversion::FIROpConversion; 582 583 mlir::LogicalResult 584 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 585 mlir::ConversionPatternRewriter &rewriter) const override { 586 mlir::Value box = adaptor.getOperands()[0]; 587 auto loc = boxtypedesc.getLoc(); 588 mlir::Type typeTy = 589 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 590 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 591 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 592 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 593 result); 594 return mlir::success(); 595 } 596 }; 597 598 /// Lower `fir.string_lit` to LLVM IR dialect operation. 599 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 600 using FIROpConversion::FIROpConversion; 601 602 mlir::LogicalResult 603 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 604 mlir::ConversionPatternRewriter &rewriter) const override { 605 auto ty = convertType(constop.getType()); 606 auto attr = constop.getValue(); 607 if (attr.isa<mlir::StringAttr>()) { 608 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 609 return mlir::success(); 610 } 611 612 auto charTy = constop.getType().cast<fir::CharacterType>(); 613 unsigned bits = lowerTy().characterBitsize(charTy); 614 mlir::Type intTy = rewriter.getIntegerType(bits); 615 mlir::Location loc = constop.getLoc(); 616 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 617 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) { 618 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr); 619 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) { 620 for (auto a : llvm::enumerate(arr.getValue())) { 621 // convert each character to a precise bitsize 622 auto elemAttr = mlir::IntegerAttr::get( 623 intTy, 624 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits)); 625 auto elemCst = 626 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr); 627 auto index = mlir::ArrayAttr::get( 628 constop.getContext(), rewriter.getI32IntegerAttr(a.index())); 629 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst, 630 index); 631 } 632 } else { 633 return mlir::failure(); 634 } 635 rewriter.replaceOp(constop, cst); 636 return mlir::success(); 637 } 638 }; 639 640 // `fir.call` -> `llvm.call` 641 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 642 using FIROpConversion::FIROpConversion; 643 644 mlir::LogicalResult 645 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 646 mlir::ConversionPatternRewriter &rewriter) const override { 647 llvm::SmallVector<mlir::Type> resultTys; 648 for (auto r : call.getResults()) 649 resultTys.push_back(convertType(r.getType())); 650 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 651 call, resultTys, adaptor.getOperands(), call->getAttrs()); 652 return mlir::success(); 653 } 654 }; 655 } // namespace 656 657 static mlir::Type getComplexEleTy(mlir::Type complex) { 658 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 659 return cc.getElementType(); 660 return complex.cast<fir::ComplexType>().getElementType(); 661 } 662 663 namespace { 664 /// Compare complex values 665 /// 666 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 667 /// 668 /// For completeness, all other comparison are done on the real component only. 669 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 670 using FIROpConversion::FIROpConversion; 671 672 mlir::LogicalResult 673 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 674 mlir::ConversionPatternRewriter &rewriter) const override { 675 mlir::ValueRange operands = adaptor.getOperands(); 676 mlir::MLIRContext *ctxt = cmp.getContext(); 677 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 678 mlir::Type resTy = convertType(cmp.getType()); 679 mlir::Location loc = cmp.getLoc(); 680 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 681 llvm::SmallVector<mlir::Value, 2> rp{ 682 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 683 pos0), 684 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 685 pos0)}; 686 auto rcp = 687 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 688 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 689 llvm::SmallVector<mlir::Value, 2> ip{ 690 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 691 pos1), 692 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 693 pos1)}; 694 auto icp = 695 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 696 llvm::SmallVector<mlir::Value, 2> cp{rcp, icp}; 697 switch (cmp.getPredicate()) { 698 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 699 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 700 break; 701 case mlir::arith::CmpFPredicate::UNE: // .NE. 702 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 703 break; 704 default: 705 rewriter.replaceOp(cmp, rcp.getResult()); 706 break; 707 } 708 return mlir::success(); 709 } 710 }; 711 712 /// Lower complex constants 713 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 714 using FIROpConversion::FIROpConversion; 715 716 mlir::LogicalResult 717 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 718 mlir::ConversionPatternRewriter &rewriter) const override { 719 mlir::Location loc = conc.getLoc(); 720 mlir::MLIRContext *ctx = conc.getContext(); 721 mlir::Type ty = convertType(conc.getType()); 722 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 723 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 724 auto realPart = 725 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 726 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 727 auto imPart = 728 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 729 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 730 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 731 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 732 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 733 loc, ty, undef, realPart, realIndex); 734 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 735 imPart, imIndex); 736 return mlir::success(); 737 } 738 739 inline llvm::APFloat getValue(mlir::Attribute attr) const { 740 return attr.cast<fir::RealAttr>().getValue(); 741 } 742 }; 743 744 /// convert value of from-type to value of to-type 745 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 746 using FIROpConversion::FIROpConversion; 747 748 static bool isFloatingPointTy(mlir::Type ty) { 749 return ty.isa<mlir::FloatType>(); 750 } 751 752 mlir::LogicalResult 753 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 754 mlir::ConversionPatternRewriter &rewriter) const override { 755 auto fromFirTy = convert.getValue().getType(); 756 auto toFirTy = convert.getRes().getType(); 757 auto fromTy = convertType(fromFirTy); 758 auto toTy = convertType(toFirTy); 759 mlir::Value op0 = adaptor.getOperands()[0]; 760 if (fromTy == toTy) { 761 rewriter.replaceOp(convert, op0); 762 return mlir::success(); 763 } 764 auto loc = convert.getLoc(); 765 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 766 unsigned toBits, mlir::Type toTy) -> mlir::Value { 767 if (fromBits == toBits) { 768 // TODO: Converting between two floating-point representations with the 769 // same bitwidth is not allowed for now. 770 mlir::emitError(loc, 771 "cannot implicitly convert between two floating-point " 772 "representations of the same bitwidth"); 773 return {}; 774 } 775 if (fromBits > toBits) 776 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 777 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 778 }; 779 // Complex to complex conversion. 780 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 781 // Special case: handle the conversion of a complex such that both the 782 // real and imaginary parts are converted together. 783 auto zero = mlir::ArrayAttr::get(convert.getContext(), 784 rewriter.getI32IntegerAttr(0)); 785 auto one = mlir::ArrayAttr::get(convert.getContext(), 786 rewriter.getI32IntegerAttr(1)); 787 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 788 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 789 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 790 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 791 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 792 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 793 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 794 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 795 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 796 auto i1 = 797 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 798 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 799 ic, one); 800 return mlir::success(); 801 } 802 803 // Follow UNIX F77 convention for logicals: 804 // 1. underlying integer is not zero => logical is .TRUE. 805 // 2. logical is .TRUE. => set underlying integer to 1. 806 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 807 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 808 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 809 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 810 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 811 return mlir::success(); 812 } 813 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 814 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 815 return mlir::success(); 816 } 817 818 // Floating point to floating point conversion. 819 if (isFloatingPointTy(fromTy)) { 820 if (isFloatingPointTy(toTy)) { 821 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 822 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 823 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 824 rewriter.replaceOp(convert, v); 825 return mlir::success(); 826 } 827 if (toTy.isa<mlir::IntegerType>()) { 828 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 829 return mlir::success(); 830 } 831 } else if (fromTy.isa<mlir::IntegerType>()) { 832 // Integer to integer conversion. 833 if (toTy.isa<mlir::IntegerType>()) { 834 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 835 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 836 assert(fromBits != toBits); 837 if (fromBits > toBits) { 838 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 839 return mlir::success(); 840 } 841 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 842 return mlir::success(); 843 } 844 // Integer to floating point conversion. 845 if (isFloatingPointTy(toTy)) { 846 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 847 return mlir::success(); 848 } 849 // Integer to pointer conversion. 850 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 851 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 852 return mlir::success(); 853 } 854 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 855 // Pointer to integer conversion. 856 if (toTy.isa<mlir::IntegerType>()) { 857 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 858 return mlir::success(); 859 } 860 // Pointer to pointer conversion. 861 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 862 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 863 return mlir::success(); 864 } 865 } 866 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 867 } 868 }; 869 870 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 871 /// table. 872 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 873 using FIROpConversion::FIROpConversion; 874 875 mlir::LogicalResult 876 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 877 mlir::ConversionPatternRewriter &rewriter) const override { 878 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 879 return mlir::failure(); 880 } 881 }; 882 883 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 884 /// derived type. 885 struct DispatchTableOpConversion 886 : public FIROpConversion<fir::DispatchTableOp> { 887 using FIROpConversion::FIROpConversion; 888 889 mlir::LogicalResult 890 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 891 mlir::ConversionPatternRewriter &rewriter) const override { 892 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 893 return mlir::failure(); 894 } 895 }; 896 897 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 898 /// method-name to a function. 899 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 900 using FIROpConversion::FIROpConversion; 901 902 mlir::LogicalResult 903 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 904 mlir::ConversionPatternRewriter &rewriter) const override { 905 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 906 return mlir::failure(); 907 } 908 }; 909 910 /// Lower `fir.global_len` operation. 911 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 912 using FIROpConversion::FIROpConversion; 913 914 mlir::LogicalResult 915 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 916 mlir::ConversionPatternRewriter &rewriter) const override { 917 TODO(globalLen.getLoc(), "fir.global_len codegen"); 918 return mlir::failure(); 919 } 920 }; 921 922 /// Lower fir.len_param_index 923 struct LenParamIndexOpConversion 924 : public FIROpConversion<fir::LenParamIndexOp> { 925 using FIROpConversion::FIROpConversion; 926 927 // FIXME: this should be specialized by the runtime target 928 mlir::LogicalResult 929 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 930 mlir::ConversionPatternRewriter &rewriter) const override { 931 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 932 } 933 }; 934 935 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 936 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 937 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 938 /// element is the length of the character buffer (`#n`). 939 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 940 using FIROpConversion::FIROpConversion; 941 942 mlir::LogicalResult 943 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 944 mlir::ConversionPatternRewriter &rewriter) const override { 945 mlir::ValueRange operands = adaptor.getOperands(); 946 auto *ctx = emboxChar.getContext(); 947 948 mlir::Value charBuffer = operands[0]; 949 mlir::Value charBufferLen = operands[1]; 950 951 mlir::Location loc = emboxChar.getLoc(); 952 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 953 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 954 955 mlir::Type lenTy = 956 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 957 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 958 959 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 960 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 961 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 962 loc, llvmStructTy, llvmStruct, charBuffer, c0); 963 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 964 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 965 966 return mlir::success(); 967 } 968 }; 969 } // namespace 970 971 /// Return the LLVMFuncOp corresponding to the standard malloc call. 972 static mlir::LLVM::LLVMFuncOp 973 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 974 auto module = op->getParentOfType<mlir::ModuleOp>(); 975 if (mlir::LLVM::LLVMFuncOp mallocFunc = 976 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 977 return mallocFunc; 978 mlir::OpBuilder moduleBuilder( 979 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 980 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 981 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 982 rewriter.getUnknownLoc(), "malloc", 983 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 984 indexType, 985 /*isVarArg=*/false)); 986 } 987 988 /// Helper function for generating the LLVM IR that computes the size 989 /// in bytes for a derived type. 990 static mlir::Value 991 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 992 mlir::ConversionPatternRewriter &rewriter) { 993 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 994 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 995 llvm::SmallVector<mlir::Value> args{one}; 996 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 997 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 998 } 999 1000 namespace { 1001 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 1002 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 1003 using FIROpConversion::FIROpConversion; 1004 1005 mlir::LogicalResult 1006 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 1007 mlir::ConversionPatternRewriter &rewriter) const override { 1008 auto heapTy = heap.getType(); 1009 auto ty = convertType(heapTy); 1010 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 1011 mlir::Location loc = heap.getLoc(); 1012 auto ity = lowerTy().indexType(); 1013 auto dataTy = fir::unwrapRefType(heapTy); 1014 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 1015 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 1016 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 1017 if (auto scaleSize = genAllocationScaleSize(heap, ity, rewriter)) 1018 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 1019 for (mlir::Value opnd : adaptor.getOperands()) 1020 size = rewriter.create<mlir::LLVM::MulOp>( 1021 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1022 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1023 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1024 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1025 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1026 malloc.getResult(0)); 1027 return mlir::success(); 1028 } 1029 1030 // Compute the (allocation) size of the allocmem type in bytes. 1031 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1032 mlir::ConversionPatternRewriter &rewriter, 1033 mlir::Type llTy) const { 1034 // Use the primitive size, if available. 1035 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1036 if (auto size = 1037 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1038 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1039 1040 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1041 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1042 } 1043 }; 1044 } // namespace 1045 1046 /// Return the LLVMFuncOp corresponding to the standard free call. 1047 static mlir::LLVM::LLVMFuncOp 1048 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1049 auto module = op->getParentOfType<mlir::ModuleOp>(); 1050 if (mlir::LLVM::LLVMFuncOp freeFunc = 1051 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1052 return freeFunc; 1053 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1054 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1055 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1056 rewriter.getUnknownLoc(), "free", 1057 mlir::LLVM::LLVMFunctionType::get(voidType, 1058 getVoidPtrType(op.getContext()), 1059 /*isVarArg=*/false)); 1060 } 1061 1062 namespace { 1063 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1064 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1065 using FIROpConversion::FIROpConversion; 1066 1067 mlir::LogicalResult 1068 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1069 mlir::ConversionPatternRewriter &rewriter) const override { 1070 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1071 mlir::Location loc = freemem.getLoc(); 1072 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1073 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1074 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1075 rewriter.create<mlir::LLVM::CallOp>( 1076 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1077 rewriter.eraseOp(freemem); 1078 return mlir::success(); 1079 } 1080 }; 1081 } // namespace 1082 1083 namespace {} // namespace 1084 1085 /// Common base class for embox to descriptor conversion. 1086 template <typename OP> 1087 struct EmboxCommonConversion : public FIROpConversion<OP> { 1088 using FIROpConversion<OP>::FIROpConversion; 1089 1090 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1091 // The order to find the LLVMFuncOp is as follows: 1092 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1093 // 2. The first ancestor that is a LLVMFuncOp. 1094 mlir::LLVM::LLVMFuncOp 1095 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1096 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1097 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1098 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1099 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1100 } 1101 1102 // Generate an alloca of size 1 and type \p toTy. 1103 mlir::LLVM::AllocaOp 1104 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1105 mlir::ConversionPatternRewriter &rewriter) const { 1106 auto thisPt = rewriter.saveInsertionPoint(); 1107 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1108 rewriter.setInsertionPointToStart(&func.front()); 1109 auto size = this->genI32Constant(loc, rewriter, 1); 1110 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1111 rewriter.restoreInsertionPoint(thisPt); 1112 return al; 1113 } 1114 1115 static int getCFIAttr(fir::BoxType boxTy) { 1116 auto eleTy = boxTy.getEleTy(); 1117 if (eleTy.isa<fir::PointerType>()) 1118 return CFI_attribute_pointer; 1119 if (eleTy.isa<fir::HeapType>()) 1120 return CFI_attribute_allocatable; 1121 return CFI_attribute_other; 1122 } 1123 1124 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1125 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1126 .template dyn_cast<fir::RecordType>(); 1127 } 1128 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1129 auto recTy = unwrapIfDerived(boxTy); 1130 return recTy && recTy.getNumLenParams() > 0; 1131 } 1132 static bool isDerivedType(fir::BoxType boxTy) { 1133 return unwrapIfDerived(boxTy) != nullptr; 1134 } 1135 1136 // Get the element size and CFI type code of the boxed value. 1137 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1138 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1139 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1140 auto doInteger = 1141 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1142 int typeCode = fir::integerBitsToTypeCode(width); 1143 return {this->genConstantOffset(loc, rewriter, width / 8), 1144 this->genConstantOffset(loc, rewriter, typeCode)}; 1145 }; 1146 auto doLogical = 1147 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1148 int typeCode = fir::logicalBitsToTypeCode(width); 1149 return {this->genConstantOffset(loc, rewriter, width / 8), 1150 this->genConstantOffset(loc, rewriter, typeCode)}; 1151 }; 1152 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1153 int typeCode = fir::realBitsToTypeCode(width); 1154 return {this->genConstantOffset(loc, rewriter, width / 8), 1155 this->genConstantOffset(loc, rewriter, typeCode)}; 1156 }; 1157 auto doComplex = 1158 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1159 auto typeCode = fir::complexBitsToTypeCode(width); 1160 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1161 this->genConstantOffset(loc, rewriter, typeCode)}; 1162 }; 1163 auto doCharacter = 1164 [&](unsigned width, 1165 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1166 auto typeCode = fir::characterBitsToTypeCode(width); 1167 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1168 if (width == 8) 1169 return {len, typeCodeVal}; 1170 auto byteWidth = this->genConstantOffset(loc, rewriter, width / 8); 1171 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1172 auto size = 1173 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len); 1174 return {size, typeCodeVal}; 1175 }; 1176 auto getKindMap = [&]() -> fir::KindMapping & { 1177 return this->lowerTy().getKindMap(); 1178 }; 1179 // Pointer-like types. 1180 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1181 boxEleTy = eleTy; 1182 // Integer types. 1183 if (fir::isa_integer(boxEleTy)) { 1184 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1185 return doInteger(ty.getWidth()); 1186 auto ty = boxEleTy.cast<fir::IntegerType>(); 1187 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1188 } 1189 // Floating point types. 1190 if (fir::isa_real(boxEleTy)) { 1191 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1192 return doFloat(ty.getWidth()); 1193 auto ty = boxEleTy.cast<fir::RealType>(); 1194 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1195 } 1196 // Complex types. 1197 if (fir::isa_complex(boxEleTy)) { 1198 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1199 return doComplex( 1200 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1201 auto ty = boxEleTy.cast<fir::ComplexType>(); 1202 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1203 } 1204 // Character types. 1205 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1206 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1207 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1208 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1209 return doCharacter(charWidth, len); 1210 } 1211 assert(!lenParams.empty()); 1212 return doCharacter(charWidth, lenParams.back()); 1213 } 1214 // Logical type. 1215 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1216 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1217 // Array types. 1218 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1219 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1220 // Derived-type types. 1221 if (boxEleTy.isa<fir::RecordType>()) { 1222 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1223 this->lowerTy().convertType(boxEleTy)); 1224 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1225 auto one = 1226 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1227 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1228 mlir::ValueRange{one}); 1229 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1230 loc, this->lowerTy().indexType(), gep); 1231 return {eleSize, 1232 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1233 } 1234 // Reference type. 1235 if (fir::isa_ref_type(boxEleTy)) { 1236 // FIXME: use the target pointer size rather than sizeof(void*) 1237 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1238 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1239 } 1240 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1241 } 1242 1243 /// Basic pattern to write a field in the descriptor 1244 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1245 mlir::Location loc, mlir::Value dest, 1246 llvm::ArrayRef<unsigned> fldIndexes, 1247 mlir::Value value, bool bitcast = false) const { 1248 auto boxTy = dest.getType(); 1249 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1250 if (bitcast) 1251 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1252 else 1253 value = this->integerCast(loc, rewriter, fldTy, value); 1254 llvm::SmallVector<mlir::Attribute, 2> attrs; 1255 for (auto i : fldIndexes) 1256 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1257 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1258 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1259 indexesAttr); 1260 } 1261 1262 inline mlir::Value 1263 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1264 mlir::Location loc, mlir::Value dest, 1265 mlir::Value base) const { 1266 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1267 /*bitCast=*/true); 1268 } 1269 1270 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1271 mlir::Location loc, mlir::Value dest, 1272 unsigned dim, mlir::Value lb) const { 1273 return insertField(rewriter, loc, dest, 1274 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1275 } 1276 1277 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1278 mlir::Location loc, mlir::Value dest, 1279 unsigned dim, mlir::Value extent) const { 1280 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1281 extent); 1282 } 1283 1284 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1285 mlir::Location loc, mlir::Value dest, 1286 unsigned dim, mlir::Value stride) const { 1287 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1288 stride); 1289 } 1290 1291 /// Get the address of the type descriptor global variable that was created by 1292 /// lowering for derived type \p recType. 1293 template <typename BOX> 1294 mlir::Value 1295 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1296 mlir::Location loc, fir::RecordType recType) const { 1297 std::string name = 1298 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1299 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1300 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1301 auto ty = mlir::LLVM::LLVMPointerType::get( 1302 this->lowerTy().convertType(global.getType())); 1303 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1304 global.getSymName()); 1305 } 1306 if (auto global = 1307 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1308 // The global may have already been translated to LLVM. 1309 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1310 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1311 global.getSymName()); 1312 } 1313 // Type info derived types do not have type descriptors since they are the 1314 // types defining type descriptors. 1315 if (!this->options.ignoreMissingTypeDescriptors && 1316 !fir::NameUniquer::belongsToModule( 1317 name, Fortran::semantics::typeInfoBuiltinModule)) 1318 fir::emitFatalError( 1319 loc, "runtime derived type info descriptor was not generated"); 1320 return rewriter.create<mlir::LLVM::NullOp>( 1321 loc, ::getVoidPtrType(box.getContext())); 1322 } 1323 1324 template <typename BOX> 1325 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1326 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1327 unsigned rank, mlir::ValueRange lenParams) const { 1328 auto loc = box.getLoc(); 1329 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1330 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1331 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1332 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1333 mlir::Value descriptor = 1334 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1335 1336 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1337 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1338 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1339 typeparams.push_back(box.substr()[1]); 1340 } 1341 1342 // Write each of the fields with the appropriate values 1343 auto [eleSize, cfiTy] = 1344 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1345 descriptor = 1346 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1347 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1348 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1349 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1350 this->genI32Constant(loc, rewriter, rank)); 1351 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1352 descriptor = 1353 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1354 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1355 const bool hasAddendum = isDerivedType(boxTy); 1356 descriptor = 1357 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1358 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1359 1360 if (hasAddendum) { 1361 auto isArray = 1362 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1363 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1364 auto typeDesc = 1365 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1366 descriptor = 1367 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1368 /*bitCast=*/true); 1369 } 1370 1371 return {boxTy, descriptor, eleSize}; 1372 } 1373 1374 /// Compute the base address of a substring given the base address of a scalar 1375 /// string and the zero based string lower bound. 1376 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1377 mlir::Location loc, mlir::Value base, 1378 mlir::Value lowerBound) const { 1379 llvm::SmallVector<mlir::Value> gepOperands; 1380 auto baseType = 1381 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1382 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1383 auto idxTy = this->lowerTy().indexType(); 1384 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1385 gepOperands.push_back(zero); 1386 } 1387 gepOperands.push_back(lowerBound); 1388 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1389 } 1390 1391 /// If the embox is not in a globalOp body, allocate storage for the box; 1392 /// store the value inside and return the generated alloca. Return the input 1393 /// value otherwise. 1394 mlir::Value 1395 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1396 mlir::Location loc, mlir::Value boxValue) const { 1397 auto *thisBlock = rewriter.getInsertionBlock(); 1398 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1399 return boxValue; 1400 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1401 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1402 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1403 return alloca; 1404 } 1405 }; 1406 1407 /// Compute the extent of a triplet slice (lb:ub:step). 1408 static mlir::Value 1409 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1410 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1411 mlir::Value step, mlir::Value zero, mlir::Type type) { 1412 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1413 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1414 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1415 // If the resulting extent is negative (`ub-lb` and `step` have different 1416 // signs), zero must be returned instead. 1417 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1418 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1419 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1420 } 1421 1422 /// Create a generic box on a memory reference. This conversions lowers the 1423 /// abstract box to the appropriate, initialized descriptor. 1424 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1425 using EmboxCommonConversion::EmboxCommonConversion; 1426 1427 mlir::LogicalResult 1428 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1429 mlir::ConversionPatternRewriter &rewriter) const override { 1430 assert(!embox.getShape() && "There should be no dims on this embox op"); 1431 auto [boxTy, dest, eleSize] = 1432 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1433 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1434 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1435 adaptor.getOperands()[0]); 1436 if (isDerivedTypeWithLenParams(boxTy)) { 1437 TODO(embox.getLoc(), 1438 "fir.embox codegen of derived with length parameters"); 1439 return mlir::failure(); 1440 } 1441 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1442 rewriter.replaceOp(embox, result); 1443 return mlir::success(); 1444 } 1445 }; 1446 1447 /// Create a generic box on a memory reference. 1448 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1449 using EmboxCommonConversion::EmboxCommonConversion; 1450 1451 mlir::LogicalResult 1452 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1453 mlir::ConversionPatternRewriter &rewriter) const override { 1454 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1455 xbox, rewriter, xbox.getOutRank(), 1456 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1457 // Generate the triples in the dims field of the descriptor 1458 mlir::ValueRange operands = adaptor.getOperands(); 1459 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1460 mlir::Value base = operands[0]; 1461 assert(!xbox.shape().empty() && "must have a shape"); 1462 unsigned shapeOffset = xbox.shapeOffset(); 1463 bool hasShift = !xbox.shift().empty(); 1464 unsigned shiftOffset = xbox.shiftOffset(); 1465 bool hasSlice = !xbox.slice().empty(); 1466 unsigned sliceOffset = xbox.sliceOffset(); 1467 mlir::Location loc = xbox.getLoc(); 1468 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1469 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1470 mlir::Value prevDim = integerCast(loc, rewriter, i64Ty, eleSize); 1471 mlir::Value prevPtrOff = one; 1472 mlir::Type eleTy = boxTy.getEleTy(); 1473 const unsigned rank = xbox.getRank(); 1474 llvm::SmallVector<mlir::Value> gepArgs; 1475 unsigned constRows = 0; 1476 mlir::Value ptrOffset = zero; 1477 if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType())) 1478 if (auto seqTy = memEleTy.dyn_cast<fir::SequenceType>()) { 1479 mlir::Type seqEleTy = seqTy.getEleTy(); 1480 // Adjust the element scaling factor if the element is a dependent type. 1481 if (fir::hasDynamicSize(seqEleTy)) { 1482 if (fir::isa_char(seqEleTy)) { 1483 assert(xbox.lenParams().size() == 1); 1484 prevPtrOff = integerCast(loc, rewriter, i64Ty, 1485 operands[xbox.lenParamOffset()]); 1486 } else if (seqEleTy.isa<fir::RecordType>()) { 1487 TODO(loc, "generate call to calculate size of PDT"); 1488 } else { 1489 return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type"); 1490 } 1491 } else { 1492 constRows = seqTy.getConstantRows(); 1493 } 1494 } 1495 1496 bool hasSubcomp = !xbox.subcomponent().empty(); 1497 if (!xbox.substr().empty()) 1498 TODO(loc, "codegen of fir.embox with substring"); 1499 1500 mlir::Value stepExpr; 1501 if (hasSubcomp) { 1502 // We have a subcomponent. The step value needs to be the number of 1503 // bytes per element (which is a derived type). 1504 mlir::Type ty0 = base.getType(); 1505 [[maybe_unused]] auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1506 assert(ptrTy && "expected pointer type"); 1507 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1508 assert(memEleTy && "expected fir pointer type"); 1509 auto seqTy = memEleTy.dyn_cast<fir::SequenceType>(); 1510 assert(seqTy && "expected sequence type"); 1511 mlir::Type seqEleTy = seqTy.getEleTy(); 1512 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1513 stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1514 } 1515 1516 // Process the array subspace arguments (shape, shift, etc.), if any, 1517 // translating everything to values in the descriptor wherever the entity 1518 // has a dynamic array dimension. 1519 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1520 mlir::Value extent = operands[shapeOffset]; 1521 mlir::Value outerExtent = extent; 1522 bool skipNext = false; 1523 if (hasSlice) { 1524 mlir::Value off = operands[sliceOffset]; 1525 mlir::Value adj = one; 1526 if (hasShift) 1527 adj = operands[shiftOffset]; 1528 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1529 if (constRows > 0) { 1530 gepArgs.push_back(ao); 1531 } else { 1532 auto dimOff = 1533 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1534 ptrOffset = 1535 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1536 } 1537 if (mlir::isa_and_nonnull<fir::UndefOp>( 1538 xbox.slice()[3 * di + 1].getDefiningOp())) { 1539 // This dimension contains a scalar expression in the array slice op. 1540 // The dimension is loop invariant, will be dropped, and will not 1541 // appear in the descriptor. 1542 skipNext = true; 1543 } 1544 } 1545 if (!skipNext) { 1546 if (hasSlice) 1547 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1548 operands[sliceOffset + 1], 1549 operands[sliceOffset + 2], zero, i64Ty); 1550 // store lower bound (normally 0) for BIND(C) interoperability. 1551 mlir::Value lb = zero; 1552 const bool isaPointerOrAllocatable = 1553 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1554 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1555 // denormalized descriptors. 1556 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) { 1557 lb = one; 1558 // If there is a shifted origin, and no fir.slice, and this is not 1559 // a normalized descriptor then use the value from the shift op as 1560 // the lower bound. 1561 if (hasShift && !(hasSlice || hasSubcomp)) { 1562 lb = operands[shiftOffset]; 1563 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1564 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1565 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1566 lb); 1567 } 1568 } 1569 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1570 1571 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1572 1573 // store step (scaled by shaped extent) 1574 1575 mlir::Value step = hasSubcomp ? stepExpr : prevDim; 1576 if (hasSlice) 1577 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1578 operands[sliceOffset + 2]); 1579 dest = insertStride(rewriter, loc, dest, descIdx, step); 1580 ++descIdx; 1581 } 1582 1583 // compute the stride and offset for the next natural dimension 1584 prevDim = 1585 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent); 1586 if (constRows == 0) 1587 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1588 outerExtent); 1589 else 1590 --constRows; 1591 1592 // increment iterators 1593 ++shapeOffset; 1594 if (hasShift) 1595 ++shiftOffset; 1596 if (hasSlice) 1597 sliceOffset += 3; 1598 } 1599 if (hasSlice || hasSubcomp || !xbox.substr().empty()) { 1600 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1601 args.append(gepArgs.rbegin(), gepArgs.rend()); 1602 if (hasSubcomp) { 1603 // For each field in the path add the offset to base via the args list. 1604 // In the most general case, some offsets must be computed since 1605 // they are not be known until runtime. 1606 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1607 fir::unwrapPassByRefType(xbox.memref().getType())))) 1608 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1609 args.append(operands.begin() + xbox.subcomponentOffset(), 1610 operands.begin() + xbox.subcomponentOffset() + 1611 xbox.subcomponent().size()); 1612 } 1613 base = 1614 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1615 if (!xbox.substr().empty()) 1616 base = shiftSubstringBase(rewriter, loc, base, 1617 operands[xbox.substrOffset()]); 1618 } 1619 dest = insertBaseAddress(rewriter, loc, dest, base); 1620 if (isDerivedTypeWithLenParams(boxTy)) 1621 TODO(loc, "fir.embox codegen of derived with length parameters"); 1622 1623 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1624 rewriter.replaceOp(xbox, result); 1625 return mlir::success(); 1626 } 1627 1628 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1629 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1630 /// zero origin lower bound for interoperability with BIND(C). 1631 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1632 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1633 } 1634 }; 1635 1636 /// Create a new box given a box reference. 1637 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1638 using EmboxCommonConversion::EmboxCommonConversion; 1639 1640 mlir::LogicalResult 1641 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1642 mlir::ConversionPatternRewriter &rewriter) const override { 1643 mlir::Location loc = rebox.getLoc(); 1644 mlir::Type idxTy = lowerTy().indexType(); 1645 mlir::Value loweredBox = adaptor.getOperands()[0]; 1646 mlir::ValueRange operands = adaptor.getOperands(); 1647 1648 // Create new descriptor and fill its non-shape related data. 1649 llvm::SmallVector<mlir::Value, 2> lenParams; 1650 mlir::Type inputEleTy = getInputEleTy(rebox); 1651 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1652 mlir::Value len = 1653 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1654 if (charTy.getFKind() != 1) { 1655 mlir::Value width = 1656 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1657 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1658 } 1659 lenParams.emplace_back(len); 1660 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1661 if (recTy.getNumLenParams() != 0) 1662 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1663 } 1664 auto [boxTy, dest, eleSize] = 1665 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1666 1667 // Read input extents, strides, and base address 1668 llvm::SmallVector<mlir::Value> inputExtents; 1669 llvm::SmallVector<mlir::Value> inputStrides; 1670 const unsigned inputRank = rebox.getRank(); 1671 for (unsigned i = 0; i < inputRank; ++i) { 1672 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1673 llvm::SmallVector<mlir::Value, 3> dimInfo = 1674 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1675 inputExtents.emplace_back(dimInfo[1]); 1676 inputStrides.emplace_back(dimInfo[2]); 1677 } 1678 1679 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1680 mlir::Value baseAddr = 1681 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1682 1683 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1684 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1685 operands, rewriter); 1686 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1687 operands, rewriter); 1688 } 1689 1690 private: 1691 /// Write resulting shape and base address in descriptor, and replace rebox 1692 /// op. 1693 mlir::LogicalResult 1694 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1695 mlir::ValueRange lbounds, mlir::ValueRange extents, 1696 mlir::ValueRange strides, 1697 mlir::ConversionPatternRewriter &rewriter) const { 1698 mlir::Location loc = rebox.getLoc(); 1699 mlir::Value zero = 1700 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1701 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1702 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1703 mlir::Value extent = std::get<0>(iter.value()); 1704 unsigned dim = iter.index(); 1705 mlir::Value lb = one; 1706 if (!lbounds.empty()) { 1707 lb = lbounds[dim]; 1708 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1709 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1710 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1711 }; 1712 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1713 dest = insertExtent(rewriter, loc, dest, dim, extent); 1714 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1715 } 1716 dest = insertBaseAddress(rewriter, loc, dest, base); 1717 mlir::Value result = 1718 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1719 rewriter.replaceOp(rebox, result); 1720 return mlir::success(); 1721 } 1722 1723 // Apply slice given the base address, extents and strides of the input box. 1724 mlir::LogicalResult 1725 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1726 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1727 mlir::ValueRange operands, 1728 mlir::ConversionPatternRewriter &rewriter) const { 1729 mlir::Location loc = rebox.getLoc(); 1730 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1731 mlir::Type idxTy = lowerTy().indexType(); 1732 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1733 // Apply subcomponent and substring shift on base address. 1734 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1735 // Cast to inputEleTy* so that a GEP can be used. 1736 mlir::Type inputEleTy = getInputEleTy(rebox); 1737 auto llvmElePtrTy = 1738 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1739 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1740 1741 if (!rebox.subcomponent().empty()) { 1742 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1743 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1744 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1745 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1746 } 1747 if (!rebox.substr().empty()) 1748 base = shiftSubstringBase(rewriter, loc, base, 1749 operands[rebox.substrOffset()]); 1750 } 1751 1752 if (rebox.slice().empty()) 1753 // The array section is of the form array[%component][substring], keep 1754 // the input array extents and strides. 1755 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1756 inputExtents, inputStrides, rewriter); 1757 1758 // Strides from the fir.box are in bytes. 1759 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1760 1761 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1762 // and strides. 1763 llvm::SmallVector<mlir::Value> slicedExtents; 1764 llvm::SmallVector<mlir::Value> slicedStrides; 1765 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1766 const bool sliceHasOrigins = !rebox.shift().empty(); 1767 unsigned sliceOps = rebox.sliceOffset(); 1768 unsigned shiftOps = rebox.shiftOffset(); 1769 auto strideOps = inputStrides.begin(); 1770 const unsigned inputRank = inputStrides.size(); 1771 for (unsigned i = 0; i < inputRank; 1772 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1773 mlir::Value sliceLb = 1774 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1775 mlir::Value inputStride = *strideOps; // already idxTy 1776 // Apply origin shift: base += (lb-shift)*input_stride 1777 mlir::Value sliceOrigin = 1778 sliceHasOrigins 1779 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1780 : one; 1781 mlir::Value diff = 1782 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1783 mlir::Value offset = 1784 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1785 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1786 // Apply upper bound and step if this is a triplet. Otherwise, the 1787 // dimension is dropped and no extents/strides are computed. 1788 mlir::Value upper = operands[sliceOps + 1]; 1789 const bool isTripletSlice = 1790 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1791 if (isTripletSlice) { 1792 mlir::Value step = 1793 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1794 // extent = ub-lb+step/step 1795 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1796 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1797 sliceUb, step, zero, idxTy); 1798 slicedExtents.emplace_back(extent); 1799 // stride = step*input_stride 1800 mlir::Value stride = 1801 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1802 slicedStrides.emplace_back(stride); 1803 } 1804 } 1805 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1806 slicedExtents, slicedStrides, rewriter); 1807 } 1808 1809 /// Apply a new shape to the data described by a box given the base address, 1810 /// extents and strides of the box. 1811 mlir::LogicalResult 1812 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1813 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1814 mlir::ValueRange operands, 1815 mlir::ConversionPatternRewriter &rewriter) const { 1816 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1817 operands.begin() + rebox.shiftOffset() + 1818 rebox.shift().size()}; 1819 if (rebox.shape().empty()) { 1820 // Only setting new lower bounds. 1821 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1822 inputStrides, rewriter); 1823 } 1824 1825 mlir::Location loc = rebox.getLoc(); 1826 // Strides from the fir.box are in bytes. 1827 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1828 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1829 1830 llvm::SmallVector<mlir::Value> newStrides; 1831 llvm::SmallVector<mlir::Value> newExtents; 1832 mlir::Type idxTy = lowerTy().indexType(); 1833 // First stride from input box is kept. The rest is assumed contiguous 1834 // (it is not possible to reshape otherwise). If the input is scalar, 1835 // which may be OK if all new extents are ones, the stride does not 1836 // matter, use one. 1837 mlir::Value stride = inputStrides.empty() 1838 ? genConstantIndex(loc, idxTy, rewriter, 1) 1839 : inputStrides[0]; 1840 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1841 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1842 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1843 newExtents.emplace_back(extent); 1844 newStrides.emplace_back(stride); 1845 // nextStride = extent * stride; 1846 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1847 } 1848 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1849 rewriter); 1850 } 1851 1852 /// Return scalar element type of the input box. 1853 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1854 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1855 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1856 return seqTy.getEleTy(); 1857 return ty; 1858 } 1859 }; 1860 1861 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1862 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1863 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1864 using FIROpConversion::FIROpConversion; 1865 1866 mlir::LogicalResult 1867 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1868 mlir::ConversionPatternRewriter &rewriter) const override { 1869 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1870 return mlir::failure(); 1871 } 1872 }; 1873 1874 // Code shared between insert_value and extract_value Ops. 1875 struct ValueOpCommon { 1876 // Translate the arguments pertaining to any multidimensional array to 1877 // row-major order for LLVM-IR. 1878 static void toRowMajor(llvm::SmallVectorImpl<mlir::Attribute> &attrs, 1879 mlir::Type ty) { 1880 assert(ty && "type is null"); 1881 const auto end = attrs.size(); 1882 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1883 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1884 const auto dim = getDimension(seq); 1885 if (dim > 1) { 1886 auto ub = std::min(i + dim, end); 1887 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1888 i += dim - 1; 1889 } 1890 ty = getArrayElementType(seq); 1891 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1892 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1893 } else { 1894 llvm_unreachable("index into invalid type"); 1895 } 1896 } 1897 } 1898 1899 static llvm::SmallVector<mlir::Attribute> 1900 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1901 mlir::ArrayAttr arrAttr) { 1902 llvm::SmallVector<mlir::Attribute> attrs; 1903 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1904 if (i->isa<mlir::IntegerAttr>()) { 1905 attrs.push_back(*i); 1906 } else { 1907 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1908 ++i; 1909 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1910 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1911 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1912 } 1913 } 1914 return attrs; 1915 } 1916 1917 private: 1918 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1919 unsigned result = 1; 1920 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1921 eleTy; 1922 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1923 ++result; 1924 return result; 1925 } 1926 1927 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1928 auto eleTy = ty.getElementType(); 1929 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1930 eleTy = arrTy.getElementType(); 1931 return eleTy; 1932 } 1933 }; 1934 1935 namespace { 1936 /// Extract a subobject value from an ssa-value of aggregate type 1937 struct ExtractValueOpConversion 1938 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1939 public ValueOpCommon { 1940 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1941 1942 mlir::LogicalResult 1943 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1944 mlir::ConversionPatternRewriter &rewriter) const override { 1945 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1946 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1947 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1948 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1949 extractVal, ty, adaptor.getOperands()[0], position); 1950 return mlir::success(); 1951 } 1952 }; 1953 1954 /// InsertValue is the generalized instruction for the composition of new 1955 /// aggregate type values. 1956 struct InsertValueOpConversion 1957 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1958 public ValueOpCommon { 1959 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1960 1961 mlir::LogicalResult 1962 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1963 mlir::ConversionPatternRewriter &rewriter) const override { 1964 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1965 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1966 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1967 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1968 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 1969 position); 1970 return mlir::success(); 1971 } 1972 }; 1973 1974 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1975 struct InsertOnRangeOpConversion 1976 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1977 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1978 1979 // Increments an array of subscripts in a row major fasion. 1980 void incrementSubscripts(const llvm::SmallVector<uint64_t> &dims, 1981 llvm::SmallVector<uint64_t> &subscripts) const { 1982 for (size_t i = dims.size(); i > 0; --i) { 1983 if (++subscripts[i - 1] < dims[i - 1]) { 1984 return; 1985 } 1986 subscripts[i - 1] = 0; 1987 } 1988 } 1989 1990 mlir::LogicalResult 1991 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 1992 mlir::ConversionPatternRewriter &rewriter) const override { 1993 1994 llvm::SmallVector<uint64_t> dims; 1995 auto type = adaptor.getOperands()[0].getType(); 1996 1997 // Iteratively extract the array dimensions from the type. 1998 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1999 dims.push_back(t.getNumElements()); 2000 type = t.getElementType(); 2001 } 2002 2003 llvm::SmallVector<uint64_t> lBounds; 2004 llvm::SmallVector<uint64_t> uBounds; 2005 2006 // Unzip the upper and lower bound and convert to a row major format. 2007 mlir::DenseIntElementsAttr coor = range.getCoor(); 2008 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2009 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2010 uBounds.push_back(*i++); 2011 lBounds.push_back(*i); 2012 } 2013 2014 auto &subscripts = lBounds; 2015 auto loc = range.getLoc(); 2016 mlir::Value lastOp = adaptor.getOperands()[0]; 2017 mlir::Value insertVal = adaptor.getOperands()[1]; 2018 2019 auto i64Ty = rewriter.getI64Type(); 2020 while (subscripts != uBounds) { 2021 // Convert uint64_t's to Attribute's. 2022 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2023 for (const auto &subscript : subscripts) 2024 subscriptAttrs.push_back(mlir::IntegerAttr::get(i64Ty, subscript)); 2025 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2026 loc, ty, lastOp, insertVal, 2027 mlir::ArrayAttr::get(range.getContext(), subscriptAttrs)); 2028 2029 incrementSubscripts(dims, subscripts); 2030 } 2031 2032 // Convert uint64_t's to Attribute's. 2033 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2034 for (const auto &subscript : subscripts) 2035 subscriptAttrs.push_back( 2036 mlir::IntegerAttr::get(rewriter.getI64Type(), subscript)); 2037 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2038 2039 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2040 range, ty, lastOp, insertVal, 2041 mlir::ArrayAttr::get(range.getContext(), arrayRef)); 2042 2043 return mlir::success(); 2044 } 2045 }; 2046 } // namespace 2047 2048 namespace { 2049 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2050 /// shifted etc. array. 2051 /// (See the static restriction on coordinate_of.) array_coor determines the 2052 /// coordinate (location) of a specific element. 2053 struct XArrayCoorOpConversion 2054 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2055 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2056 2057 mlir::LogicalResult 2058 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2059 mlir::ConversionPatternRewriter &rewriter) const override { 2060 auto loc = coor.getLoc(); 2061 mlir::ValueRange operands = adaptor.getOperands(); 2062 unsigned rank = coor.getRank(); 2063 assert(coor.indices().size() == rank); 2064 assert(coor.shape().empty() || coor.shape().size() == rank); 2065 assert(coor.shift().empty() || coor.shift().size() == rank); 2066 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2067 mlir::Type idxTy = lowerTy().indexType(); 2068 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2069 mlir::Value prevExt = one; 2070 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2071 mlir::Value offset = zero; 2072 const bool isShifted = !coor.shift().empty(); 2073 const bool isSliced = !coor.slice().empty(); 2074 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2075 2076 auto indexOps = coor.indices().begin(); 2077 auto shapeOps = coor.shape().begin(); 2078 auto shiftOps = coor.shift().begin(); 2079 auto sliceOps = coor.slice().begin(); 2080 // For each dimension of the array, generate the offset calculation. 2081 for (unsigned i = 0; i < rank; 2082 ++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) { 2083 mlir::Value index = 2084 integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); 2085 mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, 2086 operands[coor.shiftOffset() + i]) 2087 : one; 2088 mlir::Value step = one; 2089 bool normalSlice = isSliced; 2090 // Compute zero based index in dimension i of the element, applying 2091 // potential triplets and lower bounds. 2092 if (isSliced) { 2093 mlir::Value ub = *(sliceOps + 1); 2094 normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp()); 2095 if (normalSlice) 2096 step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2)); 2097 } 2098 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2099 mlir::Value diff = 2100 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2101 if (normalSlice) { 2102 mlir::Value sliceLb = 2103 integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]); 2104 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2105 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2106 } 2107 // Update the offset given the stride and the zero based index `diff` 2108 // that was just computed. 2109 if (baseIsBoxed) { 2110 // Use stride in bytes from the descriptor. 2111 mlir::Value stride = 2112 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2113 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2114 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2115 } else { 2116 // Use stride computed at last iteration. 2117 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2118 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2119 // Compute next stride assuming contiguity of the base array 2120 // (in element number). 2121 auto nextExt = 2122 integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]); 2123 prevExt = 2124 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2125 } 2126 } 2127 2128 // Add computed offset to the base address. 2129 if (baseIsBoxed) { 2130 // Working with byte offsets. The base address is read from the fir.box. 2131 // and need to be casted to i8* to do the pointer arithmetic. 2132 mlir::Type baseTy = 2133 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2134 mlir::Value base = 2135 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2136 mlir::Type voidPtrTy = getVoidPtrType(); 2137 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2138 llvm::SmallVector<mlir::Value> args{offset}; 2139 auto addr = 2140 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2141 if (coor.subcomponent().empty()) { 2142 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2143 return mlir::success(); 2144 } 2145 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2146 args.clear(); 2147 args.push_back(zero); 2148 if (!coor.lenParams().empty()) { 2149 // If type parameters are present, then we don't want to use a GEPOp 2150 // as below, as the LLVM struct type cannot be statically defined. 2151 TODO(loc, "derived type with type parameters"); 2152 } 2153 // TODO: array offset subcomponents must be converted to LLVM's 2154 // row-major layout here. 2155 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2156 args.push_back(operands[i]); 2157 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2158 args); 2159 return mlir::success(); 2160 } 2161 2162 // The array was not boxed, so it must be contiguous. offset is therefore an 2163 // element offset and the base type is kept in the GEP unless the element 2164 // type size is itself dynamic. 2165 mlir::Value base; 2166 if (coor.subcomponent().empty()) { 2167 // No subcomponent. 2168 if (!coor.lenParams().empty()) { 2169 // Type parameters. Adjust element size explicitly. 2170 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2171 assert(eleTy && "result must be a reference-like type"); 2172 if (fir::characterWithDynamicLen(eleTy)) { 2173 assert(coor.lenParams().size() == 1); 2174 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2175 eleTy.cast<fir::CharacterType>().getFKind()); 2176 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2177 auto scaledBySize = 2178 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2179 auto length = 2180 integerCast(loc, rewriter, idxTy, 2181 adaptor.getOperands()[coor.lenParamsOffset()]); 2182 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2183 length); 2184 } else { 2185 TODO(loc, "compute size of derived type with type parameters"); 2186 } 2187 } 2188 // Cast the base address to a pointer to T. 2189 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2190 adaptor.getOperands()[0]); 2191 } else { 2192 // Operand #0 must have a pointer type. For subcomponent slicing, we 2193 // want to cast away the array type and have a plain struct type. 2194 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2195 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2196 assert(ptrTy && "expected pointer type"); 2197 mlir::Type eleTy = ptrTy.getElementType(); 2198 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2199 eleTy = arrTy.getElementType(); 2200 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2201 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2202 adaptor.getOperands()[0]); 2203 } 2204 llvm::SmallVector<mlir::Value> args = {offset}; 2205 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2206 args.push_back(operands[i]); 2207 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2208 return mlir::success(); 2209 } 2210 }; 2211 } // namespace 2212 2213 /// Convert to (memory) reference to a reference to a subobject. 2214 /// The coordinate_of op is a Swiss army knife operation that can be used on 2215 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2216 /// With unboxed arrays, there is the restriction that the array have a static 2217 /// shape in all but the last column. 2218 struct CoordinateOpConversion 2219 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2220 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2221 2222 mlir::LogicalResult 2223 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2224 mlir::ConversionPatternRewriter &rewriter) const override { 2225 mlir::ValueRange operands = adaptor.getOperands(); 2226 2227 mlir::Location loc = coor.getLoc(); 2228 mlir::Value base = operands[0]; 2229 mlir::Type baseObjectTy = coor.getBaseType(); 2230 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2231 assert(objectTy && "fir.coordinate_of expects a reference type"); 2232 2233 // Complex type - basically, extract the real or imaginary part 2234 if (fir::isa_complex(objectTy)) { 2235 mlir::LLVM::ConstantOp c0 = 2236 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2237 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]}; 2238 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2239 rewriter.replaceOp(coor, gep); 2240 return mlir::success(); 2241 } 2242 2243 // Boxed type - get the base pointer from the box 2244 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2245 return doRewriteBox(coor, ty, operands, loc, rewriter); 2246 2247 // Reference, pointer or a heap type 2248 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>()) 2249 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2250 2251 return rewriter.notifyMatchFailure( 2252 coor, "fir.coordinate_of base operand has unsupported type"); 2253 } 2254 2255 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) { 2256 return fir::hasDynamicSize(ty) 2257 ? op.getDefiningOp() 2258 ->getAttrOfType<mlir::IntegerAttr>("field") 2259 .getInt() 2260 : getIntValue(op); 2261 } 2262 2263 static int64_t getIntValue(mlir::Value val) { 2264 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2265 mlir::Operation *defop = val.getDefiningOp(); 2266 2267 if (auto constOp = mlir::dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2268 return constOp.value(); 2269 if (auto llConstOp = mlir::dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2270 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2271 return attr.getValue().getSExtValue(); 2272 fir::emitFatalError(val.getLoc(), "must be a constant"); 2273 } 2274 2275 static bool hasSubDimensions(mlir::Type type) { 2276 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2277 } 2278 2279 /// Check whether this form of `!fir.coordinate_of` is supported. These 2280 /// additional checks are required, because we are not yet able to convert 2281 /// all valid forms of `!fir.coordinate_of`. 2282 /// TODO: Either implement the unsupported cases or extend the verifier 2283 /// in FIROps.cpp instead. 2284 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) { 2285 const std::size_t numOfCoors = coors.size(); 2286 std::size_t i = 0; 2287 bool subEle = false; 2288 bool ptrEle = false; 2289 for (; i < numOfCoors; ++i) { 2290 mlir::Value nxtOpnd = coors[i]; 2291 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2292 subEle = true; 2293 i += arrTy.getDimension() - 1; 2294 type = arrTy.getEleTy(); 2295 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2296 subEle = true; 2297 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2298 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2299 subEle = true; 2300 type = tupTy.getType(getIntValue(nxtOpnd)); 2301 } else { 2302 ptrEle = true; 2303 } 2304 } 2305 if (ptrEle) 2306 return (!subEle) && (numOfCoors == 1); 2307 return subEle && (i >= numOfCoors); 2308 } 2309 2310 /// Walk the abstract memory layout and determine if the path traverses any 2311 /// array types with unknown shape. Return true iff all the array types have a 2312 /// constant shape along the path. 2313 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) { 2314 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) { 2315 mlir::Value nxtOpnd = coors[i]; 2316 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2317 if (fir::sequenceWithNonConstantShape(arrTy)) 2318 return false; 2319 i += arrTy.getDimension() - 1; 2320 type = arrTy.getEleTy(); 2321 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2322 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2323 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2324 type = strTy.getType(getIntValue(nxtOpnd)); 2325 } else { 2326 return true; 2327 } 2328 } 2329 return true; 2330 } 2331 2332 private: 2333 mlir::LogicalResult 2334 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2335 mlir::Location loc, 2336 mlir::ConversionPatternRewriter &rewriter) const { 2337 mlir::Type boxObjTy = coor.getBaseType(); 2338 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2339 2340 mlir::Value boxBaseAddr = operands[0]; 2341 2342 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2343 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2344 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2345 // %addr = coordinate_of %box, %lenp 2346 if (coor.getNumOperands() == 2) { 2347 mlir::Operation *coordinateDef = 2348 (*coor.getCoor().begin()).getDefiningOp(); 2349 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) 2350 TODO(loc, 2351 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2352 } 2353 2354 // 2. GENERAL CASE: 2355 // 2.1. (`fir.array`) 2356 // %box = ... : !fix.box<!fir.array<?xU>> 2357 // %idx = ... : index 2358 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2359 // 2.2 (`fir.derived`) 2360 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2361 // %idx = ... : i32 2362 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2363 // 2.3 (`fir.derived` inside `fir.array`) 2364 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2365 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2366 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2367 // 2.4. TODO: Either document or disable any other case that the following 2368 // implementation might convert. 2369 mlir::LLVM::ConstantOp c0 = 2370 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2371 mlir::Value resultAddr = 2372 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2373 boxBaseAddr, rewriter); 2374 // Component Type 2375 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2376 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2377 2378 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2379 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2380 if (i != 1) 2381 TODO(loc, "fir.array nested inside other array and/or derived type"); 2382 // Applies byte strides from the box. Ignore lower bound from box 2383 // since fir.coordinate_of indexes are zero based. Lowering takes care 2384 // of lower bound aspects. This both accounts for dynamically sized 2385 // types and non contiguous arrays. 2386 auto idxTy = lowerTy().indexType(); 2387 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2388 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2389 index < lastIndex; ++index) { 2390 mlir::Value stride = 2391 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2392 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2393 operands[index], stride); 2394 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2395 } 2396 auto voidPtrBase = 2397 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2398 llvm::SmallVector<mlir::Value> args{off}; 2399 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2400 voidPtrBase, args); 2401 i += arrTy.getDimension() - 1; 2402 cpnTy = arrTy.getEleTy(); 2403 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) { 2404 auto recRefTy = 2405 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2406 mlir::Value nxtOpnd = operands[i]; 2407 auto memObj = 2408 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2409 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2410 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2411 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); 2412 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2413 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2414 args); 2415 resultAddr = 2416 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2417 } else { 2418 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2419 } 2420 } 2421 2422 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2423 return mlir::success(); 2424 } 2425 2426 mlir::LogicalResult 2427 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2428 mlir::ValueRange operands, mlir::Location loc, 2429 mlir::ConversionPatternRewriter &rewriter) const { 2430 mlir::Type baseObjectTy = coor.getBaseType(); 2431 2432 // Component Type 2433 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2434 bool hasSubdimension = hasSubDimensions(cpnTy); 2435 bool columnIsDeferred = !hasSubdimension; 2436 2437 if (!supportedCoordinate(cpnTy, operands.drop_front(1))) 2438 TODO(loc, "unsupported combination of coordinate operands"); 2439 2440 const bool hasKnownShape = 2441 arraysHaveKnownShape(cpnTy, operands.drop_front(1)); 2442 2443 // If only the column is `?`, then we can simply place the column value in 2444 // the 0-th GEP position. 2445 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2446 if (!hasKnownShape) { 2447 const unsigned sz = arrTy.getDimension(); 2448 if (arraysHaveKnownShape(arrTy.getEleTy(), 2449 operands.drop_front(1 + sz))) { 2450 fir::SequenceType::ShapeRef shape = arrTy.getShape(); 2451 bool allConst = true; 2452 for (unsigned i = 0; i < sz - 1; ++i) { 2453 if (shape[i] < 0) { 2454 allConst = false; 2455 break; 2456 } 2457 } 2458 if (allConst) 2459 columnIsDeferred = true; 2460 } 2461 } 2462 } 2463 2464 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy))) 2465 return mlir::emitError( 2466 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2467 2468 if (hasKnownShape || columnIsDeferred) { 2469 llvm::SmallVector<mlir::Value> offs; 2470 if (hasKnownShape && hasSubdimension) { 2471 mlir::LLVM::ConstantOp c0 = 2472 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2473 offs.push_back(c0); 2474 } 2475 llvm::Optional<int> dims; 2476 llvm::SmallVector<mlir::Value> arrIdx; 2477 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { 2478 mlir::Value nxtOpnd = operands[i]; 2479 2480 if (!cpnTy) 2481 return mlir::emitError(loc, "invalid coordinate/check failed"); 2482 2483 // check if the i-th coordinate relates to an array 2484 if (dims.hasValue()) { 2485 arrIdx.push_back(nxtOpnd); 2486 int dimsLeft = *dims; 2487 if (dimsLeft > 1) { 2488 dims = dimsLeft - 1; 2489 continue; 2490 } 2491 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2492 // append array range in reverse (FIR arrays are column-major) 2493 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2494 arrIdx.clear(); 2495 dims.reset(); 2496 continue; 2497 } 2498 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2499 int d = arrTy.getDimension() - 1; 2500 if (d > 0) { 2501 dims = d; 2502 arrIdx.push_back(nxtOpnd); 2503 continue; 2504 } 2505 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2506 offs.push_back(nxtOpnd); 2507 continue; 2508 } 2509 2510 // check if the i-th coordinate relates to a field 2511 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) 2512 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2513 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>()) 2514 cpnTy = tupTy.getType(getIntValue(nxtOpnd)); 2515 else 2516 cpnTy = nullptr; 2517 2518 offs.push_back(nxtOpnd); 2519 } 2520 if (dims.hasValue()) 2521 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2522 mlir::Value base = operands[0]; 2523 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2524 rewriter.replaceOp(coor, retval); 2525 return mlir::success(); 2526 } 2527 2528 return mlir::emitError( 2529 loc, "fir.coordinate_of base operand has unsupported type"); 2530 } 2531 }; 2532 2533 /// Convert `fir.field_index`. The conversion depends on whether the size of 2534 /// the record is static or dynamic. 2535 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2536 using FIROpConversion::FIROpConversion; 2537 2538 // NB: most field references should be resolved by this point 2539 mlir::LogicalResult 2540 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2541 mlir::ConversionPatternRewriter &rewriter) const override { 2542 auto recTy = field.getOnType().cast<fir::RecordType>(); 2543 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2544 2545 if (!fir::hasDynamicSize(recTy)) { 2546 // Derived type has compile-time constant layout. Return index of the 2547 // component type in the parent type (to be used in GEP). 2548 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2549 field.getLoc(), rewriter, index)}); 2550 return mlir::success(); 2551 } 2552 2553 // Derived type has compile-time constant layout. Call the compiler 2554 // generated function to determine the byte offset of the field at runtime. 2555 // This returns a non-constant. 2556 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2557 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2558 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2559 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr( 2560 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2561 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2562 field, lowerTy().offsetType(), adaptor.getOperands(), 2563 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2564 return mlir::success(); 2565 } 2566 2567 // Re-Construct the name of the compiler generated method that calculates the 2568 // offset 2569 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2570 llvm::StringRef field) { 2571 return recTy.getName().str() + "P." + field.str() + ".offset"; 2572 } 2573 }; 2574 2575 /// Convert `fir.end` 2576 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2577 using FIROpConversion::FIROpConversion; 2578 2579 mlir::LogicalResult 2580 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2581 mlir::ConversionPatternRewriter &rewriter) const override { 2582 TODO(firEnd.getLoc(), "fir.end codegen"); 2583 return mlir::failure(); 2584 } 2585 }; 2586 2587 /// Lower `fir.gentypedesc` to a global constant. 2588 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2589 using FIROpConversion::FIROpConversion; 2590 2591 mlir::LogicalResult 2592 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2593 mlir::ConversionPatternRewriter &rewriter) const override { 2594 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2595 return mlir::failure(); 2596 } 2597 }; 2598 2599 /// Lower `fir.has_value` operation to `llvm.return` operation. 2600 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2601 using FIROpConversion::FIROpConversion; 2602 2603 mlir::LogicalResult 2604 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2605 mlir::ConversionPatternRewriter &rewriter) const override { 2606 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op, 2607 adaptor.getOperands()); 2608 return mlir::success(); 2609 } 2610 }; 2611 2612 /// Lower `fir.global` operation to `llvm.global` operation. 2613 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2614 /// if they are applied on the full range. 2615 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2616 using FIROpConversion::FIROpConversion; 2617 2618 mlir::LogicalResult 2619 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2620 mlir::ConversionPatternRewriter &rewriter) const override { 2621 auto tyAttr = convertType(global.getType()); 2622 if (global.getType().isa<fir::BoxType>()) 2623 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2624 auto loc = global.getLoc(); 2625 mlir::Attribute initAttr{}; 2626 if (global.getInitVal()) 2627 initAttr = global.getInitVal().getValue(); 2628 auto linkage = convertLinkage(global.getLinkName()); 2629 auto isConst = global.getConstant().hasValue(); 2630 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2631 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2632 auto &gr = g.getInitializerRegion(); 2633 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2634 if (!gr.empty()) { 2635 // Replace insert_on_range with a constant dense attribute if the 2636 // initialization is on the full range. 2637 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2638 for (auto insertOp : insertOnRangeOps) { 2639 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2640 auto seqTyAttr = convertType(insertOp.getType()); 2641 auto *op = insertOp.getVal().getDefiningOp(); 2642 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2643 if (!constant) { 2644 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2645 if (!convertOp) 2646 continue; 2647 constant = mlir::cast<mlir::arith::ConstantOp>( 2648 convertOp.getValue().getDefiningOp()); 2649 } 2650 mlir::Type vecType = mlir::VectorType::get( 2651 insertOp.getType().getShape(), constant.getType()); 2652 auto denseAttr = mlir::DenseElementsAttr::get( 2653 vecType.cast<mlir::ShapedType>(), constant.getValue()); 2654 rewriter.setInsertionPointAfter(insertOp); 2655 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2656 insertOp, seqTyAttr, denseAttr); 2657 } 2658 } 2659 } 2660 rewriter.eraseOp(global); 2661 return mlir::success(); 2662 } 2663 2664 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2665 fir::SequenceType seqTy) const { 2666 auto extents = seqTy.getShape(); 2667 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2668 return false; 2669 auto cur_index = indexes.value_begin<int64_t>(); 2670 for (unsigned i = 0; i < indexes.size(); i += 2) { 2671 if (*(cur_index++) != 0) 2672 return false; 2673 if (*(cur_index++) != extents[i / 2] - 1) 2674 return false; 2675 } 2676 return true; 2677 } 2678 2679 // TODO: String comparaison should be avoided. Replace linkName with an 2680 // enumeration. 2681 mlir::LLVM::Linkage 2682 convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const { 2683 if (optLinkage.hasValue()) { 2684 auto name = optLinkage.getValue(); 2685 if (name == "internal") 2686 return mlir::LLVM::Linkage::Internal; 2687 if (name == "linkonce") 2688 return mlir::LLVM::Linkage::Linkonce; 2689 if (name == "linkonce_odr") 2690 return mlir::LLVM::Linkage::LinkonceODR; 2691 if (name == "common") 2692 return mlir::LLVM::Linkage::Common; 2693 if (name == "weak") 2694 return mlir::LLVM::Linkage::Weak; 2695 } 2696 return mlir::LLVM::Linkage::External; 2697 } 2698 }; 2699 2700 /// `fir.load` --> `llvm.load` 2701 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2702 using FIROpConversion::FIROpConversion; 2703 2704 mlir::LogicalResult 2705 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2706 mlir::ConversionPatternRewriter &rewriter) const override { 2707 // fir.box is a special case because it is considered as an ssa values in 2708 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2709 // and fir.box end up being the same llvm types and loading a 2710 // fir.ref<fir.box> is actually a no op in LLVM. 2711 if (load.getType().isa<fir::BoxType>()) { 2712 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2713 } else { 2714 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2715 load, convertType(load.getType()), adaptor.getOperands(), 2716 load->getAttrs()); 2717 } 2718 return mlir::success(); 2719 } 2720 }; 2721 2722 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2723 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2724 /// math flags? 2725 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2726 using FIROpConversion::FIROpConversion; 2727 2728 mlir::LogicalResult 2729 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2730 mlir::ConversionPatternRewriter &rewriter) const override { 2731 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2732 return mlir::success(); 2733 } 2734 }; 2735 2736 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2737 llvm::Optional<mlir::ValueRange> destOps, 2738 mlir::ConversionPatternRewriter &rewriter, 2739 mlir::Block *newBlock) { 2740 if (destOps.hasValue()) 2741 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 2742 newBlock, mlir::ValueRange()); 2743 else 2744 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2745 } 2746 2747 template <typename A, typename B> 2748 static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps, 2749 mlir::ConversionPatternRewriter &rewriter) { 2750 if (destOps.hasValue()) 2751 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 2752 dest); 2753 else 2754 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2755 } 2756 2757 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2758 mlir::Block *dest, 2759 llvm::Optional<mlir::ValueRange> destOps, 2760 mlir::ConversionPatternRewriter &rewriter) { 2761 auto *thisBlock = rewriter.getInsertionBlock(); 2762 auto *newBlock = createBlock(rewriter, dest); 2763 rewriter.setInsertionPointToEnd(thisBlock); 2764 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2765 rewriter.setInsertionPointToEnd(newBlock); 2766 } 2767 2768 /// Conversion of `fir.select_case` 2769 /// 2770 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2771 /// Depending on the case condition type, one or several comparison and 2772 /// conditional branching can be generated. 2773 /// 2774 /// A a point value case such as `case(4)`, a lower bound case such as 2775 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2776 /// simple comparison between the selector value and the constant value in the 2777 /// case. The block associated with the case condition is then executed if 2778 /// the comparison succeed otherwise it branch to the next block with the 2779 /// comparison for the the next case conditon. 2780 /// 2781 /// A closed interval case condition such as `case(7:10)` is converted with a 2782 /// first comparison and conditional branching for the lower bound. If 2783 /// successful, it branch to a second block with the comparison for the 2784 /// upper bound in the same case condition. 2785 /// 2786 /// TODO: lowering of CHARACTER type cases is not handled yet. 2787 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2788 using FIROpConversion::FIROpConversion; 2789 2790 mlir::LogicalResult 2791 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2792 mlir::ConversionPatternRewriter &rewriter) const override { 2793 unsigned conds = caseOp.getNumConditions(); 2794 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2795 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2796 auto ty = caseOp.getSelector().getType(); 2797 if (ty.isa<fir::CharacterType>()) { 2798 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2799 return mlir::failure(); 2800 } 2801 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2802 auto loc = caseOp.getLoc(); 2803 for (unsigned t = 0; t != conds; ++t) { 2804 mlir::Block *dest = caseOp.getSuccessor(t); 2805 llvm::Optional<mlir::ValueRange> destOps = 2806 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2807 llvm::Optional<mlir::ValueRange> cmpOps = 2808 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2809 mlir::Value caseArg = *(cmpOps.getValue().begin()); 2810 mlir::Attribute attr = cases[t]; 2811 if (attr.isa<fir::PointIntervalAttr>()) { 2812 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2813 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2814 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2815 continue; 2816 } 2817 if (attr.isa<fir::LowerBoundAttr>()) { 2818 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2819 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2820 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2821 continue; 2822 } 2823 if (attr.isa<fir::UpperBoundAttr>()) { 2824 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2825 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2826 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2827 continue; 2828 } 2829 if (attr.isa<fir::ClosedIntervalAttr>()) { 2830 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2831 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2832 auto *thisBlock = rewriter.getInsertionBlock(); 2833 auto *newBlock1 = createBlock(rewriter, dest); 2834 auto *newBlock2 = createBlock(rewriter, dest); 2835 rewriter.setInsertionPointToEnd(thisBlock); 2836 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2837 rewriter.setInsertionPointToEnd(newBlock1); 2838 mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1); 2839 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2840 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2841 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2842 rewriter.setInsertionPointToEnd(newBlock2); 2843 continue; 2844 } 2845 assert(attr.isa<mlir::UnitAttr>()); 2846 assert((t + 1 == conds) && "unit must be last"); 2847 genBrOp(caseOp, dest, destOps, rewriter); 2848 } 2849 return mlir::success(); 2850 } 2851 }; 2852 2853 template <typename OP> 2854 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2855 typename OP::Adaptor adaptor, 2856 mlir::ConversionPatternRewriter &rewriter) { 2857 unsigned conds = select.getNumConditions(); 2858 auto cases = select.getCases().getValue(); 2859 mlir::Value selector = adaptor.getSelector(); 2860 auto loc = select.getLoc(); 2861 assert(conds > 0 && "select must have cases"); 2862 2863 llvm::SmallVector<mlir::Block *> destinations; 2864 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2865 mlir::Block *defaultDestination; 2866 mlir::ValueRange defaultOperands; 2867 llvm::SmallVector<int32_t> caseValues; 2868 2869 for (unsigned t = 0; t != conds; ++t) { 2870 mlir::Block *dest = select.getSuccessor(t); 2871 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2872 const mlir::Attribute &attr = cases[t]; 2873 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2874 destinations.push_back(dest); 2875 destinationsOperands.push_back(destOps.hasValue() ? *destOps 2876 : mlir::ValueRange{}); 2877 caseValues.push_back(intAttr.getInt()); 2878 continue; 2879 } 2880 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2881 assert((t + 1 == conds) && "unit must be last"); 2882 defaultDestination = dest; 2883 defaultOperands = destOps.hasValue() ? *destOps : mlir::ValueRange{}; 2884 } 2885 2886 // LLVM::SwitchOp takes a i32 type for the selector. 2887 if (select.getSelector().getType() != rewriter.getI32Type()) 2888 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(), 2889 selector); 2890 2891 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2892 select, selector, 2893 /*defaultDestination=*/defaultDestination, 2894 /*defaultOperands=*/defaultOperands, 2895 /*caseValues=*/caseValues, 2896 /*caseDestinations=*/destinations, 2897 /*caseOperands=*/destinationsOperands, 2898 /*branchWeights=*/llvm::ArrayRef<std::int32_t>()); 2899 } 2900 2901 /// conversion of fir::SelectOp to an if-then-else ladder 2902 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2903 using FIROpConversion::FIROpConversion; 2904 2905 mlir::LogicalResult 2906 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2907 mlir::ConversionPatternRewriter &rewriter) const override { 2908 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2909 return mlir::success(); 2910 } 2911 }; 2912 2913 /// conversion of fir::SelectRankOp to an if-then-else ladder 2914 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2915 using FIROpConversion::FIROpConversion; 2916 2917 mlir::LogicalResult 2918 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2919 mlir::ConversionPatternRewriter &rewriter) const override { 2920 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2921 return mlir::success(); 2922 } 2923 }; 2924 2925 /// Lower `fir.select_type` to LLVM IR dialect. 2926 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2927 using FIROpConversion::FIROpConversion; 2928 2929 mlir::LogicalResult 2930 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2931 mlir::ConversionPatternRewriter &rewriter) const override { 2932 mlir::emitError(select.getLoc(), 2933 "fir.select_type should have already been converted"); 2934 return mlir::failure(); 2935 } 2936 }; 2937 2938 /// `fir.store` --> `llvm.store` 2939 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2940 using FIROpConversion::FIROpConversion; 2941 2942 mlir::LogicalResult 2943 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2944 mlir::ConversionPatternRewriter &rewriter) const override { 2945 if (store.getValue().getType().isa<fir::BoxType>()) { 2946 // fir.box value is actually in memory, load it first before storing it. 2947 mlir::Location loc = store.getLoc(); 2948 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2949 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2950 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2951 adaptor.getOperands()[0]); 2952 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2953 store, val, adaptor.getOperands()[1]); 2954 } else { 2955 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2956 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2957 } 2958 return mlir::success(); 2959 } 2960 }; 2961 2962 namespace { 2963 2964 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2965 /// the character buffer and one for the buffer length. 2966 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2967 using FIROpConversion::FIROpConversion; 2968 2969 mlir::LogicalResult 2970 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2971 mlir::ConversionPatternRewriter &rewriter) const override { 2972 auto *ctx = unboxchar.getContext(); 2973 2974 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2975 mlir::Value tuple = adaptor.getOperands()[0]; 2976 mlir::Type tupleTy = tuple.getType(); 2977 2978 mlir::Location loc = unboxchar.getLoc(); 2979 mlir::Value ptrToBuffer = 2980 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2981 2982 mlir::LLVM::ExtractValueOp len = 2983 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2984 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2985 2986 rewriter.replaceOp(unboxchar, 2987 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2988 return mlir::success(); 2989 } 2990 }; 2991 2992 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 2993 /// components. 2994 /// TODO: Part of supporting Fortran 2003 procedure pointers. 2995 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 2996 using FIROpConversion::FIROpConversion; 2997 2998 mlir::LogicalResult 2999 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 3000 mlir::ConversionPatternRewriter &rewriter) const override { 3001 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 3002 return mlir::failure(); 3003 } 3004 }; 3005 3006 /// convert to LLVM IR dialect `undef` 3007 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 3008 using FIROpConversion::FIROpConversion; 3009 3010 mlir::LogicalResult 3011 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3012 mlir::ConversionPatternRewriter &rewriter) const override { 3013 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3014 undef, convertType(undef.getType())); 3015 return mlir::success(); 3016 } 3017 }; 3018 3019 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3020 using FIROpConversion::FIROpConversion; 3021 3022 mlir::LogicalResult 3023 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3024 mlir::ConversionPatternRewriter &rewriter) const override { 3025 mlir::Type ty = convertType(zero.getType()); 3026 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3027 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3028 } else if (ty.isa<mlir::IntegerType>()) { 3029 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3030 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3031 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3032 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3033 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3034 } else { 3035 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3036 return rewriter.notifyMatchFailure( 3037 zero, 3038 "conversion of fir.zero with aggregate type not implemented yet"); 3039 } 3040 return mlir::success(); 3041 } 3042 }; 3043 3044 /// `fir.unreachable` --> `llvm.unreachable` 3045 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3046 using FIROpConversion::FIROpConversion; 3047 3048 mlir::LogicalResult 3049 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3050 mlir::ConversionPatternRewriter &rewriter) const override { 3051 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3052 return mlir::success(); 3053 } 3054 }; 3055 3056 /// `fir.is_present` --> 3057 /// ``` 3058 /// %0 = llvm.mlir.constant(0 : i64) 3059 /// %1 = llvm.ptrtoint %0 3060 /// %2 = llvm.icmp "ne" %1, %0 : i64 3061 /// ``` 3062 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3063 using FIROpConversion::FIROpConversion; 3064 3065 mlir::LogicalResult 3066 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3067 mlir::ConversionPatternRewriter &rewriter) const override { 3068 mlir::Type idxTy = lowerTy().indexType(); 3069 mlir::Location loc = isPresent.getLoc(); 3070 auto ptr = adaptor.getOperands()[0]; 3071 3072 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3073 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3074 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3075 3076 mlir::Type ty = structTy.getBody()[0]; 3077 mlir::MLIRContext *ctx = isPresent.getContext(); 3078 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3079 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3080 } 3081 mlir::LLVM::ConstantOp c0 = 3082 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3083 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3084 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3085 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3086 3087 return mlir::success(); 3088 } 3089 }; 3090 3091 /// Create value signaling an absent optional argument in a call, e.g. 3092 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3093 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3094 using FIROpConversion::FIROpConversion; 3095 3096 mlir::LogicalResult 3097 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3098 mlir::ConversionPatternRewriter &rewriter) const override { 3099 mlir::Type ty = convertType(absent.getType()); 3100 mlir::Location loc = absent.getLoc(); 3101 3102 if (absent.getType().isa<fir::BoxCharType>()) { 3103 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3104 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3105 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3106 auto nullField = 3107 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3108 mlir::MLIRContext *ctx = absent.getContext(); 3109 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3110 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3111 absent, ty, undefStruct, nullField, c0); 3112 } else { 3113 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3114 } 3115 return mlir::success(); 3116 } 3117 }; 3118 3119 // 3120 // Primitive operations on Complex types 3121 // 3122 3123 /// Generate inline code for complex addition/subtraction 3124 template <typename LLVMOP, typename OPTY> 3125 static mlir::LLVM::InsertValueOp 3126 complexSum(OPTY sumop, mlir::ValueRange opnds, 3127 mlir::ConversionPatternRewriter &rewriter, 3128 fir::LLVMTypeConverter &lowering) { 3129 mlir::Value a = opnds[0]; 3130 mlir::Value b = opnds[1]; 3131 auto loc = sumop.getLoc(); 3132 auto ctx = sumop.getContext(); 3133 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3134 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3135 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3136 mlir::Type ty = lowering.convertType(sumop.getType()); 3137 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3138 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3139 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3140 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3141 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3142 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3143 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3144 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3145 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3146 } 3147 } // namespace 3148 3149 namespace { 3150 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3151 using FIROpConversion::FIROpConversion; 3152 3153 mlir::LogicalResult 3154 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3155 mlir::ConversionPatternRewriter &rewriter) const override { 3156 // given: (x + iy) + (x' + iy') 3157 // result: (x + x') + i(y + y') 3158 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3159 rewriter, lowerTy()); 3160 rewriter.replaceOp(addc, r.getResult()); 3161 return mlir::success(); 3162 } 3163 }; 3164 3165 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3166 using FIROpConversion::FIROpConversion; 3167 3168 mlir::LogicalResult 3169 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3170 mlir::ConversionPatternRewriter &rewriter) const override { 3171 // given: (x + iy) - (x' + iy') 3172 // result: (x - x') + i(y - y') 3173 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3174 rewriter, lowerTy()); 3175 rewriter.replaceOp(subc, r.getResult()); 3176 return mlir::success(); 3177 } 3178 }; 3179 3180 /// Inlined complex multiply 3181 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3182 using FIROpConversion::FIROpConversion; 3183 3184 mlir::LogicalResult 3185 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3186 mlir::ConversionPatternRewriter &rewriter) const override { 3187 // TODO: Can we use a call to __muldc3 ? 3188 // given: (x + iy) * (x' + iy') 3189 // result: (xx'-yy')+i(xy'+yx') 3190 mlir::Value a = adaptor.getOperands()[0]; 3191 mlir::Value b = adaptor.getOperands()[1]; 3192 auto loc = mulc.getLoc(); 3193 auto *ctx = mulc.getContext(); 3194 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3195 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3196 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3197 mlir::Type ty = convertType(mulc.getType()); 3198 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3199 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3200 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3201 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3202 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3203 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3204 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3205 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3206 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3207 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3208 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3209 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3210 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3211 rewriter.replaceOp(mulc, r0.getResult()); 3212 return mlir::success(); 3213 } 3214 }; 3215 3216 /// Inlined complex division 3217 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3218 using FIROpConversion::FIROpConversion; 3219 3220 mlir::LogicalResult 3221 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3222 mlir::ConversionPatternRewriter &rewriter) const override { 3223 // TODO: Can we use a call to __divdc3 instead? 3224 // Just generate inline code for now. 3225 // given: (x + iy) / (x' + iy') 3226 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3227 mlir::Value a = adaptor.getOperands()[0]; 3228 mlir::Value b = adaptor.getOperands()[1]; 3229 auto loc = divc.getLoc(); 3230 auto *ctx = divc.getContext(); 3231 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3232 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3233 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3234 mlir::Type ty = convertType(divc.getType()); 3235 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3236 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3237 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3238 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3239 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3240 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3241 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3242 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3243 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3244 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3245 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3246 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3247 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3248 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3249 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3250 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3251 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3252 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3253 rewriter.replaceOp(divc, r0.getResult()); 3254 return mlir::success(); 3255 } 3256 }; 3257 3258 /// Inlined complex negation 3259 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3260 using FIROpConversion::FIROpConversion; 3261 3262 mlir::LogicalResult 3263 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3264 mlir::ConversionPatternRewriter &rewriter) const override { 3265 // given: -(x + iy) 3266 // result: -x - iy 3267 auto *ctxt = neg.getContext(); 3268 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3269 auto ty = convertType(neg.getType()); 3270 auto loc = neg.getLoc(); 3271 mlir::Value o0 = adaptor.getOperands()[0]; 3272 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3273 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3274 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3275 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3276 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3277 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3278 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3279 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3280 return mlir::success(); 3281 } 3282 }; 3283 3284 /// Conversion pattern for operation that must be dead. The information in these 3285 /// operations is used by other operation. At this point they should not have 3286 /// anymore uses. 3287 /// These operations are normally dead after the pre-codegen pass. 3288 template <typename FromOp> 3289 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3290 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3291 const fir::FIRToLLVMPassOptions &options) 3292 : FIROpConversion<FromOp>(lowering, options) {} 3293 using OpAdaptor = typename FromOp::Adaptor; 3294 3295 mlir::LogicalResult 3296 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3297 mlir::ConversionPatternRewriter &rewriter) const final { 3298 if (!op->getUses().empty()) 3299 return rewriter.notifyMatchFailure(op, "op must be dead"); 3300 rewriter.eraseOp(op); 3301 return mlir::success(); 3302 } 3303 }; 3304 3305 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3306 using MustBeDeadConversion::MustBeDeadConversion; 3307 }; 3308 3309 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3310 using MustBeDeadConversion::MustBeDeadConversion; 3311 }; 3312 3313 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3314 using MustBeDeadConversion::MustBeDeadConversion; 3315 }; 3316 3317 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3318 using MustBeDeadConversion::MustBeDeadConversion; 3319 }; 3320 3321 } // namespace 3322 3323 namespace { 3324 /// Convert FIR dialect to LLVM dialect 3325 /// 3326 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3327 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3328 /// 3329 /// This pass is not complete yet. We are upstreaming it in small patches. 3330 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3331 public: 3332 FIRToLLVMLowering() = default; 3333 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3334 mlir::ModuleOp getModule() { return getOperation(); } 3335 3336 void runOnOperation() override final { 3337 auto mod = getModule(); 3338 if (!forcedTargetTriple.empty()) 3339 fir::setTargetTriple(mod, forcedTargetTriple); 3340 3341 auto *context = getModule().getContext(); 3342 fir::LLVMTypeConverter typeConverter{getModule()}; 3343 mlir::RewritePatternSet pattern(context); 3344 pattern.insert< 3345 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3346 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3347 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3348 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3349 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3350 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3351 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3352 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3353 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3354 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3355 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3356 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3357 InsertValueOpConversion, IsPresentOpConversion, 3358 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3359 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3360 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3361 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3362 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3363 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3364 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3365 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3366 options); 3367 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3368 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3369 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3370 pattern); 3371 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3372 pattern); 3373 mlir::ConversionTarget target{*context}; 3374 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3375 // The OpenMP dialect is legal for Operations without regions, for those 3376 // which contains regions it is legal if the region contains only the 3377 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and 3378 // legalize conversion of OpenMP operations without regions. 3379 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter); 3380 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3381 3382 // required NOPs for applying a full conversion 3383 target.addLegalOp<mlir::ModuleOp>(); 3384 3385 // apply the patterns 3386 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3387 std::move(pattern)))) { 3388 signalPassFailure(); 3389 } 3390 } 3391 3392 private: 3393 fir::FIRToLLVMPassOptions options; 3394 }; 3395 3396 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3397 struct LLVMIRLoweringPass 3398 : public mlir::PassWrapper<LLVMIRLoweringPass, 3399 mlir::OperationPass<mlir::ModuleOp>> { 3400 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass) 3401 3402 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p) 3403 : output{output}, printer{p} {} 3404 3405 mlir::ModuleOp getModule() { return getOperation(); } 3406 3407 void runOnOperation() override final { 3408 auto *ctx = getModule().getContext(); 3409 auto optName = getModule().getName(); 3410 llvm::LLVMContext llvmCtx; 3411 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3412 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3413 printer(*llvmModule, output); 3414 return; 3415 } 3416 3417 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3418 signalPassFailure(); 3419 } 3420 3421 private: 3422 llvm::raw_ostream &output; 3423 fir::LLVMIRLoweringPrinter printer; 3424 }; 3425 3426 } // namespace 3427 3428 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3429 return std::make_unique<FIRToLLVMLowering>(); 3430 } 3431 3432 std::unique_ptr<mlir::Pass> 3433 fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) { 3434 return std::make_unique<FIRToLLVMLowering>(options); 3435 } 3436 3437 std::unique_ptr<mlir::Pass> 3438 fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output, 3439 fir::LLVMIRLoweringPrinter printer) { 3440 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3441 } 3442