1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 27 #include "mlir/IR/BuiltinTypes.h" 28 #include "mlir/IR/Matchers.h" 29 #include "mlir/Pass/Pass.h" 30 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 31 #include "llvm/ADT/ArrayRef.h" 32 33 #define DEBUG_TYPE "flang-codegen" 34 35 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 36 #include "TypeConverter.h" 37 38 // TODO: This should really be recovered from the specified target. 39 static constexpr unsigned defaultAlign = 8; 40 41 /// `fir.box` attribute values as defined for CFI_attribute_t in 42 /// flang/ISO_Fortran_binding.h. 43 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 44 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 45 46 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 47 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 48 } 49 50 static mlir::LLVM::ConstantOp 51 genConstantIndex(mlir::Location loc, mlir::Type ity, 52 mlir::ConversionPatternRewriter &rewriter, 53 std::int64_t offset) { 54 auto cattr = rewriter.getI64IntegerAttr(offset); 55 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 56 } 57 58 static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 59 mlir::Block *insertBefore) { 60 assert(insertBefore && "expected valid insertion block"); 61 return rewriter.createBlock(insertBefore->getParent(), 62 mlir::Region::iterator(insertBefore)); 63 } 64 65 namespace { 66 /// FIR conversion pattern template 67 template <typename FromOp> 68 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 69 public: 70 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 71 const fir::FIRToLLVMPassOptions &options) 72 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 73 74 protected: 75 mlir::Type convertType(mlir::Type ty) const { 76 return lowerTy().convertType(ty); 77 } 78 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 79 80 mlir::Type getVoidPtrType() const { 81 return mlir::LLVM::LLVMPointerType::get( 82 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 83 } 84 85 mlir::LLVM::ConstantOp 86 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 87 int value) const { 88 mlir::Type i32Ty = rewriter.getI32Type(); 89 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 90 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 91 } 92 93 mlir::LLVM::ConstantOp 94 genConstantOffset(mlir::Location loc, 95 mlir::ConversionPatternRewriter &rewriter, 96 int offset) const { 97 mlir::Type ity = lowerTy().offsetType(); 98 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 99 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 100 } 101 102 /// Perform an extension or truncation as needed on an integer value. Lowering 103 /// to the specific target may involve some sign-extending or truncation of 104 /// values, particularly to fit them from abstract box types to the 105 /// appropriate reified structures. 106 mlir::Value integerCast(mlir::Location loc, 107 mlir::ConversionPatternRewriter &rewriter, 108 mlir::Type ty, mlir::Value val) const { 109 auto valTy = val.getType(); 110 // If the value was not yet lowered, lower its type so that it can 111 // be used in getPrimitiveTypeSizeInBits. 112 if (!valTy.isa<mlir::IntegerType>()) 113 valTy = convertType(valTy); 114 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 115 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 116 if (toSize < fromSize) 117 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 118 if (toSize > fromSize) 119 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 120 return val; 121 } 122 123 /// Construct code sequence to extract the specifc value from a `fir.box`. 124 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 125 mlir::Type resultTy, 126 mlir::ConversionPatternRewriter &rewriter, 127 unsigned boxValue) const { 128 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 129 mlir::LLVM::ConstantOp cValuePos = 130 genConstantOffset(loc, rewriter, boxValue); 131 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 132 auto p = rewriter.create<mlir::LLVM::GEPOp>( 133 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 134 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 135 } 136 137 /// Method to construct code sequence to get the triple for dimension `dim` 138 /// from a box. 139 llvm::SmallVector<mlir::Value, 3> 140 getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, 141 mlir::Value box, mlir::Value dim, 142 mlir::ConversionPatternRewriter &rewriter) const { 143 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 144 mlir::LLVM::ConstantOp cDims = 145 genConstantOffset(loc, rewriter, kDimsPosInBox); 146 mlir::LLVM::LoadOp l0 = 147 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 148 mlir::LLVM::LoadOp l1 = 149 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 150 mlir::LLVM::LoadOp l2 = 151 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 152 return {l0.getResult(), l1.getResult(), l2.getResult()}; 153 } 154 155 mlir::LLVM::LoadOp 156 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 157 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 158 mlir::Type ty, 159 mlir::ConversionPatternRewriter &rewriter) const { 160 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 161 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 162 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 163 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 164 } 165 166 mlir::Value 167 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 168 mlir::ConversionPatternRewriter &rewriter) const { 169 auto idxTy = lowerTy().indexType(); 170 auto c0 = genConstantOffset(loc, rewriter, 0); 171 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 172 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 173 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 174 rewriter); 175 } 176 177 /// Read base address from a fir.box. Returned address has type ty. 178 mlir::Value 179 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 180 mlir::ConversionPatternRewriter &rewriter) const { 181 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 182 mlir::LLVM::ConstantOp cAddr = 183 genConstantOffset(loc, rewriter, kAddrPosInBox); 184 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 185 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 186 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 187 } 188 189 mlir::Value 190 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 191 mlir::ConversionPatternRewriter &rewriter) const { 192 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 193 mlir::LLVM::ConstantOp cElemLen = 194 genConstantOffset(loc, rewriter, kElemLenPosInBox); 195 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 196 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 197 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 198 } 199 200 // Get the element type given an LLVM type that is of the form 201 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 202 static mlir::Type getBoxEleTy(mlir::Type type, 203 llvm::ArrayRef<unsigned> indexes) { 204 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 205 type = t.getElementType(); 206 for (auto i : indexes) { 207 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 208 assert(!t.isOpaque() && i < t.getBody().size()); 209 type = t.getBody()[i]; 210 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 211 type = t.getElementType(); 212 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 213 type = t.getElementType(); 214 } else { 215 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 216 "request for invalid box element type"); 217 } 218 } 219 return type; 220 } 221 222 // Return LLVM type of the base address given the LLVM type 223 // of the related descriptor (lowered fir.box type). 224 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 225 return getBoxEleTy(type, {kAddrPosInBox}); 226 } 227 228 // Load the attribute from the \p box and perform a check against \p maskValue 229 // The final comparison is implemented as `(attribute & maskValue) != 0`. 230 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 231 mlir::ConversionPatternRewriter &rewriter, 232 unsigned maskValue) const { 233 mlir::Type attrTy = rewriter.getI32Type(); 234 mlir::Value attribute = 235 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 236 mlir::LLVM::ConstantOp attrMask = 237 genConstantOffset(loc, rewriter, maskValue); 238 auto maskRes = 239 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 240 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 241 return rewriter.create<mlir::LLVM::ICmpOp>( 242 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 243 } 244 245 template <typename... ARGS> 246 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 247 mlir::ConversionPatternRewriter &rewriter, 248 mlir::Value base, ARGS... args) const { 249 llvm::SmallVector<mlir::Value> cv{args...}; 250 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 251 } 252 253 fir::LLVMTypeConverter &lowerTy() const { 254 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 255 } 256 257 const fir::FIRToLLVMPassOptions &options; 258 }; 259 260 /// FIR conversion pattern template 261 template <typename FromOp> 262 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 263 public: 264 using FIROpConversion<FromOp>::FIROpConversion; 265 using OpAdaptor = typename FromOp::Adaptor; 266 267 mlir::LogicalResult 268 matchAndRewrite(FromOp op, OpAdaptor adaptor, 269 mlir::ConversionPatternRewriter &rewriter) const final { 270 mlir::Type ty = this->convertType(op.getType()); 271 return doRewrite(op, ty, adaptor, rewriter); 272 } 273 274 virtual mlir::LogicalResult 275 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 276 mlir::ConversionPatternRewriter &rewriter) const = 0; 277 }; 278 279 // Lower `fir.address_of` operation to `llvm.address_of` operation. 280 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 281 using FIROpConversion::FIROpConversion; 282 283 mlir::LogicalResult 284 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 285 mlir::ConversionPatternRewriter &rewriter) const override { 286 auto ty = convertType(addr.getType()); 287 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 288 addr, ty, addr.getSymbol().getRootReference().getValue()); 289 return mlir::success(); 290 } 291 }; 292 } // namespace 293 294 /// Lookup the function to compute the memory size of this parametric derived 295 /// type. The size of the object may depend on the LEN type parameters of the 296 /// derived type. 297 static mlir::LLVM::LLVMFuncOp 298 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 299 mlir::ConversionPatternRewriter &rewriter) { 300 auto module = op->getParentOfType<mlir::ModuleOp>(); 301 std::string name = recTy.getName().str() + "P.mem.size"; 302 return module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name); 303 } 304 305 namespace { 306 /// convert to LLVM IR dialect `alloca` 307 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 308 using FIROpConversion::FIROpConversion; 309 310 mlir::LogicalResult 311 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 312 mlir::ConversionPatternRewriter &rewriter) const override { 313 mlir::ValueRange operands = adaptor.getOperands(); 314 auto loc = alloc.getLoc(); 315 mlir::Type ity = lowerTy().indexType(); 316 unsigned i = 0; 317 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 318 mlir::Type ty = convertType(alloc.getType()); 319 mlir::Type resultTy = ty; 320 if (alloc.hasLenParams()) { 321 unsigned end = alloc.numLenParams(); 322 llvm::SmallVector<mlir::Value> lenParams; 323 for (; i < end; ++i) 324 lenParams.push_back(operands[i]); 325 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 326 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 327 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 328 chrTy.getContext(), chrTy.getFKind()); 329 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 330 assert(end == 1); 331 size = integerCast(loc, rewriter, ity, lenParams[0]); 332 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 333 mlir::LLVM::LLVMFuncOp memSizeFn = 334 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 335 if (!memSizeFn) 336 emitError(loc, "did not find allocation function"); 337 mlir::NamedAttribute attr = rewriter.getNamedAttr( 338 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 339 auto call = rewriter.create<mlir::LLVM::CallOp>( 340 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 341 size = call.getResult(0); 342 ty = mlir::LLVM::LLVMPointerType::get( 343 mlir::IntegerType::get(alloc.getContext(), 8)); 344 } else { 345 return emitError(loc, "unexpected type ") 346 << scalarType << " with type parameters"; 347 } 348 } 349 if (alloc.hasShapeOperands()) { 350 mlir::Type allocEleTy = fir::unwrapRefType(alloc.getType()); 351 // Scale the size by constant factors encoded in the array type. 352 // We only do this for arrays that don't have a constant interior, since 353 // those are the only ones that get decayed to a pointer to the element 354 // type. 355 if (auto seqTy = allocEleTy.dyn_cast<fir::SequenceType>()) { 356 if (!seqTy.hasConstantInterior()) { 357 fir::SequenceType::Extent constSize = 1; 358 for (auto extent : seqTy.getShape()) 359 if (extent != fir::SequenceType::getUnknownExtent()) 360 constSize *= extent; 361 mlir::Value constVal{ 362 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 363 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, constVal); 364 } 365 } 366 unsigned end = operands.size(); 367 for (; i < end; ++i) 368 size = rewriter.create<mlir::LLVM::MulOp>( 369 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 370 } 371 if (ty == resultTy) { 372 // Do not emit the bitcast if ty and resultTy are the same. 373 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 374 alloc->getAttrs()); 375 } else { 376 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 377 alloc->getAttrs()); 378 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 379 } 380 return mlir::success(); 381 } 382 }; 383 } // namespace 384 385 /// Construct an `llvm.extractvalue` instruction. It will return value at 386 /// element \p x from \p tuple. 387 static mlir::LLVM::ExtractValueOp 388 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 389 mlir::ConversionPatternRewriter &rewriter, 390 mlir::MLIRContext *ctx, int x) { 391 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 392 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 393 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 394 } 395 396 namespace { 397 /// Lower `fir.box_addr` to the sequence of operations to extract the first 398 /// element of the box. 399 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 400 using FIROpConversion::FIROpConversion; 401 402 mlir::LogicalResult 403 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 404 mlir::ConversionPatternRewriter &rewriter) const override { 405 mlir::Value a = adaptor.getOperands()[0]; 406 auto loc = boxaddr.getLoc(); 407 mlir::Type ty = convertType(boxaddr.getType()); 408 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 409 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 410 } else { 411 auto c0attr = rewriter.getI32IntegerAttr(0); 412 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 413 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 414 c0); 415 } 416 return mlir::success(); 417 } 418 }; 419 420 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 421 /// boxchar. 422 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 423 using FIROpConversion::FIROpConversion; 424 425 mlir::LogicalResult 426 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 427 mlir::ConversionPatternRewriter &rewriter) const override { 428 mlir::Value boxChar = adaptor.getOperands()[0]; 429 mlir::Location loc = boxChar.getLoc(); 430 mlir::MLIRContext *ctx = boxChar.getContext(); 431 mlir::Type returnValTy = boxCharLen.getResult().getType(); 432 433 constexpr int boxcharLenIdx = 1; 434 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 435 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 436 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 437 rewriter.replaceOp(boxCharLen, lenAfterCast); 438 439 return mlir::success(); 440 } 441 }; 442 443 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 444 /// dimension infomartion from the boxed value. 445 /// Result in a triple set of GEPs and loads. 446 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 447 using FIROpConversion::FIROpConversion; 448 449 mlir::LogicalResult 450 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 451 mlir::ConversionPatternRewriter &rewriter) const override { 452 llvm::SmallVector<mlir::Type, 3> resultTypes = { 453 convertType(boxdims.getResult(0).getType()), 454 convertType(boxdims.getResult(1).getType()), 455 convertType(boxdims.getResult(2).getType()), 456 }; 457 auto results = 458 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 459 adaptor.getOperands()[1], rewriter); 460 rewriter.replaceOp(boxdims, results); 461 return mlir::success(); 462 } 463 }; 464 465 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 466 /// an element in the boxed value. 467 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 468 using FIROpConversion::FIROpConversion; 469 470 mlir::LogicalResult 471 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 472 mlir::ConversionPatternRewriter &rewriter) const override { 473 mlir::Value a = adaptor.getOperands()[0]; 474 auto loc = boxelesz.getLoc(); 475 auto ty = convertType(boxelesz.getType()); 476 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 477 rewriter.replaceOp(boxelesz, elemSize); 478 return mlir::success(); 479 } 480 }; 481 482 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 483 /// boxed value was from an ALLOCATABLE entity. 484 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 485 using FIROpConversion::FIROpConversion; 486 487 mlir::LogicalResult 488 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 489 mlir::ConversionPatternRewriter &rewriter) const override { 490 mlir::Value box = adaptor.getOperands()[0]; 491 auto loc = boxisalloc.getLoc(); 492 mlir::Value check = 493 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 494 rewriter.replaceOp(boxisalloc, check); 495 return mlir::success(); 496 } 497 }; 498 499 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 500 /// boxed is an array. 501 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 502 using FIROpConversion::FIROpConversion; 503 504 mlir::LogicalResult 505 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 506 mlir::ConversionPatternRewriter &rewriter) const override { 507 mlir::Value a = adaptor.getOperands()[0]; 508 auto loc = boxisarray.getLoc(); 509 auto rank = 510 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 511 auto c0 = genConstantOffset(loc, rewriter, 0); 512 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 513 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 514 return mlir::success(); 515 } 516 }; 517 518 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 519 /// boxed value was from a POINTER entity. 520 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 521 using FIROpConversion::FIROpConversion; 522 523 mlir::LogicalResult 524 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 525 mlir::ConversionPatternRewriter &rewriter) const override { 526 mlir::Value box = adaptor.getOperands()[0]; 527 auto loc = boxisptr.getLoc(); 528 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 529 rewriter.replaceOp(boxisptr, check); 530 return mlir::success(); 531 } 532 }; 533 534 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 535 /// the box. 536 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 537 using FIROpConversion::FIROpConversion; 538 539 mlir::LogicalResult 540 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 541 mlir::ConversionPatternRewriter &rewriter) const override { 542 mlir::Value a = adaptor.getOperands()[0]; 543 auto loc = boxrank.getLoc(); 544 mlir::Type ty = convertType(boxrank.getType()); 545 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 546 rewriter.replaceOp(boxrank, result); 547 return mlir::success(); 548 } 549 }; 550 551 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 552 /// boxproc. 553 /// TODO: Part of supporting Fortran 2003 procedure pointers. 554 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 555 using FIROpConversion::FIROpConversion; 556 557 mlir::LogicalResult 558 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 559 mlir::ConversionPatternRewriter &rewriter) const override { 560 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 561 return mlir::failure(); 562 } 563 }; 564 565 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 566 /// descriptor from the box. 567 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 568 using FIROpConversion::FIROpConversion; 569 570 mlir::LogicalResult 571 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 572 mlir::ConversionPatternRewriter &rewriter) const override { 573 mlir::Value box = adaptor.getOperands()[0]; 574 auto loc = boxtypedesc.getLoc(); 575 mlir::Type typeTy = 576 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 577 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 578 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 579 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 580 result); 581 return mlir::success(); 582 } 583 }; 584 585 /// Lower `fir.string_lit` to LLVM IR dialect operation. 586 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 587 using FIROpConversion::FIROpConversion; 588 589 mlir::LogicalResult 590 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 591 mlir::ConversionPatternRewriter &rewriter) const override { 592 auto ty = convertType(constop.getType()); 593 auto attr = constop.getValue(); 594 if (attr.isa<mlir::StringAttr>()) { 595 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 596 return mlir::success(); 597 } 598 599 auto charTy = constop.getType().cast<fir::CharacterType>(); 600 unsigned bits = lowerTy().characterBitsize(charTy); 601 mlir::Type intTy = rewriter.getIntegerType(bits); 602 mlir::Location loc = constop.getLoc(); 603 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 604 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) { 605 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr); 606 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) { 607 for (auto a : llvm::enumerate(arr.getValue())) { 608 // convert each character to a precise bitsize 609 auto elemAttr = mlir::IntegerAttr::get( 610 intTy, 611 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits)); 612 auto elemCst = 613 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr); 614 auto index = mlir::ArrayAttr::get( 615 constop.getContext(), rewriter.getI32IntegerAttr(a.index())); 616 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst, 617 index); 618 } 619 } else { 620 return mlir::failure(); 621 } 622 rewriter.replaceOp(constop, cst); 623 return mlir::success(); 624 } 625 }; 626 627 // `fir.call` -> `llvm.call` 628 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 629 using FIROpConversion::FIROpConversion; 630 631 mlir::LogicalResult 632 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 633 mlir::ConversionPatternRewriter &rewriter) const override { 634 llvm::SmallVector<mlir::Type> resultTys; 635 for (auto r : call.getResults()) 636 resultTys.push_back(convertType(r.getType())); 637 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 638 call, resultTys, adaptor.getOperands(), call->getAttrs()); 639 return mlir::success(); 640 } 641 }; 642 } // namespace 643 644 static mlir::Type getComplexEleTy(mlir::Type complex) { 645 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 646 return cc.getElementType(); 647 return complex.cast<fir::ComplexType>().getElementType(); 648 } 649 650 namespace { 651 /// Compare complex values 652 /// 653 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 654 /// 655 /// For completeness, all other comparison are done on the real component only. 656 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 657 using FIROpConversion::FIROpConversion; 658 659 mlir::LogicalResult 660 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 661 mlir::ConversionPatternRewriter &rewriter) const override { 662 mlir::ValueRange operands = adaptor.getOperands(); 663 mlir::MLIRContext *ctxt = cmp.getContext(); 664 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 665 mlir::Type resTy = convertType(cmp.getType()); 666 mlir::Location loc = cmp.getLoc(); 667 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 668 llvm::SmallVector<mlir::Value, 2> rp{ 669 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 670 pos0), 671 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 672 pos0)}; 673 auto rcp = 674 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 675 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 676 llvm::SmallVector<mlir::Value, 2> ip{ 677 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 678 pos1), 679 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 680 pos1)}; 681 auto icp = 682 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 683 llvm::SmallVector<mlir::Value, 2> cp{rcp, icp}; 684 switch (cmp.getPredicate()) { 685 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 686 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 687 break; 688 case mlir::arith::CmpFPredicate::UNE: // .NE. 689 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 690 break; 691 default: 692 rewriter.replaceOp(cmp, rcp.getResult()); 693 break; 694 } 695 return mlir::success(); 696 } 697 }; 698 699 /// Lower complex constants 700 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 701 using FIROpConversion::FIROpConversion; 702 703 mlir::LogicalResult 704 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 705 mlir::ConversionPatternRewriter &rewriter) const override { 706 mlir::Location loc = conc.getLoc(); 707 mlir::MLIRContext *ctx = conc.getContext(); 708 mlir::Type ty = convertType(conc.getType()); 709 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 710 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 711 auto realPart = 712 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 713 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 714 auto imPart = 715 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 716 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 717 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 718 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 719 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 720 loc, ty, undef, realPart, realIndex); 721 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 722 imPart, imIndex); 723 return mlir::success(); 724 } 725 726 inline llvm::APFloat getValue(mlir::Attribute attr) const { 727 return attr.cast<fir::RealAttr>().getValue(); 728 } 729 }; 730 731 /// convert value of from-type to value of to-type 732 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 733 using FIROpConversion::FIROpConversion; 734 735 static bool isFloatingPointTy(mlir::Type ty) { 736 return ty.isa<mlir::FloatType>(); 737 } 738 739 mlir::LogicalResult 740 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 741 mlir::ConversionPatternRewriter &rewriter) const override { 742 auto fromFirTy = convert.getValue().getType(); 743 auto toFirTy = convert.getRes().getType(); 744 auto fromTy = convertType(fromFirTy); 745 auto toTy = convertType(toFirTy); 746 mlir::Value op0 = adaptor.getOperands()[0]; 747 if (fromTy == toTy) { 748 rewriter.replaceOp(convert, op0); 749 return mlir::success(); 750 } 751 auto loc = convert.getLoc(); 752 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 753 unsigned toBits, mlir::Type toTy) -> mlir::Value { 754 if (fromBits == toBits) { 755 // TODO: Converting between two floating-point representations with the 756 // same bitwidth is not allowed for now. 757 mlir::emitError(loc, 758 "cannot implicitly convert between two floating-point " 759 "representations of the same bitwidth"); 760 return {}; 761 } 762 if (fromBits > toBits) 763 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 764 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 765 }; 766 // Complex to complex conversion. 767 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 768 // Special case: handle the conversion of a complex such that both the 769 // real and imaginary parts are converted together. 770 auto zero = mlir::ArrayAttr::get(convert.getContext(), 771 rewriter.getI32IntegerAttr(0)); 772 auto one = mlir::ArrayAttr::get(convert.getContext(), 773 rewriter.getI32IntegerAttr(1)); 774 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 775 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 776 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 777 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 778 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 779 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 780 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 781 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 782 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 783 auto i1 = 784 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 785 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 786 ic, one); 787 return mlir::success(); 788 } 789 790 // Follow UNIX F77 convention for logicals: 791 // 1. underlying integer is not zero => logical is .TRUE. 792 // 2. logical is .TRUE. => set underlying integer to 1. 793 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 794 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 795 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 796 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 797 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 798 return mlir::success(); 799 } 800 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 801 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 802 return mlir::success(); 803 } 804 805 // Floating point to floating point conversion. 806 if (isFloatingPointTy(fromTy)) { 807 if (isFloatingPointTy(toTy)) { 808 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 809 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 810 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 811 rewriter.replaceOp(convert, v); 812 return mlir::success(); 813 } 814 if (toTy.isa<mlir::IntegerType>()) { 815 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 816 return mlir::success(); 817 } 818 } else if (fromTy.isa<mlir::IntegerType>()) { 819 // Integer to integer conversion. 820 if (toTy.isa<mlir::IntegerType>()) { 821 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 822 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 823 assert(fromBits != toBits); 824 if (fromBits > toBits) { 825 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 826 return mlir::success(); 827 } 828 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 829 return mlir::success(); 830 } 831 // Integer to floating point conversion. 832 if (isFloatingPointTy(toTy)) { 833 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 834 return mlir::success(); 835 } 836 // Integer to pointer conversion. 837 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 838 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 839 return mlir::success(); 840 } 841 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 842 // Pointer to integer conversion. 843 if (toTy.isa<mlir::IntegerType>()) { 844 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 845 return mlir::success(); 846 } 847 // Pointer to pointer conversion. 848 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 849 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 850 return mlir::success(); 851 } 852 } 853 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 854 } 855 }; 856 857 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 858 /// table. 859 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 860 using FIROpConversion::FIROpConversion; 861 862 mlir::LogicalResult 863 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 864 mlir::ConversionPatternRewriter &rewriter) const override { 865 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 866 return mlir::failure(); 867 } 868 }; 869 870 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 871 /// derived type. 872 struct DispatchTableOpConversion 873 : public FIROpConversion<fir::DispatchTableOp> { 874 using FIROpConversion::FIROpConversion; 875 876 mlir::LogicalResult 877 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 878 mlir::ConversionPatternRewriter &rewriter) const override { 879 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 880 return mlir::failure(); 881 } 882 }; 883 884 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 885 /// method-name to a function. 886 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 887 using FIROpConversion::FIROpConversion; 888 889 mlir::LogicalResult 890 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 891 mlir::ConversionPatternRewriter &rewriter) const override { 892 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 893 return mlir::failure(); 894 } 895 }; 896 897 /// Lower `fir.global_len` operation. 898 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 899 using FIROpConversion::FIROpConversion; 900 901 mlir::LogicalResult 902 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 903 mlir::ConversionPatternRewriter &rewriter) const override { 904 TODO(globalLen.getLoc(), "fir.global_len codegen"); 905 return mlir::failure(); 906 } 907 }; 908 909 /// Lower fir.len_param_index 910 struct LenParamIndexOpConversion 911 : public FIROpConversion<fir::LenParamIndexOp> { 912 using FIROpConversion::FIROpConversion; 913 914 // FIXME: this should be specialized by the runtime target 915 mlir::LogicalResult 916 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 917 mlir::ConversionPatternRewriter &rewriter) const override { 918 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 919 } 920 }; 921 922 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 923 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 924 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 925 /// element is the length of the character buffer (`#n`). 926 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 927 using FIROpConversion::FIROpConversion; 928 929 mlir::LogicalResult 930 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 931 mlir::ConversionPatternRewriter &rewriter) const override { 932 mlir::ValueRange operands = adaptor.getOperands(); 933 auto *ctx = emboxChar.getContext(); 934 935 mlir::Value charBuffer = operands[0]; 936 mlir::Value charBufferLen = operands[1]; 937 938 mlir::Location loc = emboxChar.getLoc(); 939 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 940 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 941 942 mlir::Type lenTy = 943 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 944 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 945 946 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 947 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 948 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 949 loc, llvmStructTy, llvmStruct, charBuffer, c0); 950 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 951 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 952 953 return mlir::success(); 954 } 955 }; 956 } // namespace 957 958 /// Return the LLVMFuncOp corresponding to the standard malloc call. 959 static mlir::LLVM::LLVMFuncOp 960 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 961 auto module = op->getParentOfType<mlir::ModuleOp>(); 962 if (mlir::LLVM::LLVMFuncOp mallocFunc = 963 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 964 return mallocFunc; 965 mlir::OpBuilder moduleBuilder( 966 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 967 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 968 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 969 rewriter.getUnknownLoc(), "malloc", 970 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 971 indexType, 972 /*isVarArg=*/false)); 973 } 974 975 /// Helper function for generating the LLVM IR that computes the size 976 /// in bytes for a derived type. 977 static mlir::Value 978 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 979 mlir::ConversionPatternRewriter &rewriter) { 980 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 981 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 982 llvm::SmallVector<mlir::Value> args{one}; 983 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 984 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 985 } 986 987 namespace { 988 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 989 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 990 using FIROpConversion::FIROpConversion; 991 992 mlir::LogicalResult 993 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 994 mlir::ConversionPatternRewriter &rewriter) const override { 995 auto heapTy = heap.getType(); 996 auto ty = convertType(heapTy); 997 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 998 mlir::Location loc = heap.getLoc(); 999 auto ity = lowerTy().indexType(); 1000 auto dataTy = fir::unwrapRefType(heapTy); 1001 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 1002 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 1003 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 1004 // !fir.array<NxMx!fir.char<K,?>> sets `size` to the width of !fir.char<K>. 1005 // So multiply the constant dimensions here. 1006 if (fir::hasDynamicSize(dataTy)) 1007 if (auto seqTy = dataTy.dyn_cast<fir::SequenceType>()) 1008 if (fir::characterWithDynamicLen(seqTy.getEleTy())) { 1009 fir::SequenceType::Extent arrSize = 1; 1010 for (auto d : seqTy.getShape()) 1011 if (d != fir::SequenceType::getUnknownExtent()) 1012 arrSize *= d; 1013 size = rewriter.create<mlir::LLVM::MulOp>( 1014 loc, ity, size, genConstantIndex(loc, ity, rewriter, arrSize)); 1015 } 1016 for (mlir::Value opnd : adaptor.getOperands()) 1017 size = rewriter.create<mlir::LLVM::MulOp>( 1018 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1019 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1020 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1021 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1022 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1023 malloc.getResult(0)); 1024 return mlir::success(); 1025 } 1026 1027 // Compute the (allocation) size of the allocmem type in bytes. 1028 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1029 mlir::ConversionPatternRewriter &rewriter, 1030 mlir::Type llTy) const { 1031 // Use the primitive size, if available. 1032 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1033 if (auto size = 1034 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1035 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1036 1037 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1038 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1039 } 1040 }; 1041 } // namespace 1042 1043 /// Return the LLVMFuncOp corresponding to the standard free call. 1044 static mlir::LLVM::LLVMFuncOp 1045 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1046 auto module = op->getParentOfType<mlir::ModuleOp>(); 1047 if (mlir::LLVM::LLVMFuncOp freeFunc = 1048 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1049 return freeFunc; 1050 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1051 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1052 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1053 rewriter.getUnknownLoc(), "free", 1054 mlir::LLVM::LLVMFunctionType::get(voidType, 1055 getVoidPtrType(op.getContext()), 1056 /*isVarArg=*/false)); 1057 } 1058 1059 namespace { 1060 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1061 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1062 using FIROpConversion::FIROpConversion; 1063 1064 mlir::LogicalResult 1065 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1066 mlir::ConversionPatternRewriter &rewriter) const override { 1067 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1068 mlir::Location loc = freemem.getLoc(); 1069 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1070 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1071 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1072 rewriter.create<mlir::LLVM::CallOp>( 1073 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1074 rewriter.eraseOp(freemem); 1075 return mlir::success(); 1076 } 1077 }; 1078 } // namespace 1079 1080 namespace {} // namespace 1081 1082 /// Common base class for embox to descriptor conversion. 1083 template <typename OP> 1084 struct EmboxCommonConversion : public FIROpConversion<OP> { 1085 using FIROpConversion<OP>::FIROpConversion; 1086 1087 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1088 // The order to find the LLVMFuncOp is as follows: 1089 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1090 // 2. The first ancestor that is a LLVMFuncOp. 1091 mlir::LLVM::LLVMFuncOp 1092 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1093 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1094 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1095 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1096 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1097 } 1098 1099 // Generate an alloca of size 1 and type \p toTy. 1100 mlir::LLVM::AllocaOp 1101 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1102 mlir::ConversionPatternRewriter &rewriter) const { 1103 auto thisPt = rewriter.saveInsertionPoint(); 1104 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1105 rewriter.setInsertionPointToStart(&func.front()); 1106 auto size = this->genI32Constant(loc, rewriter, 1); 1107 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1108 rewriter.restoreInsertionPoint(thisPt); 1109 return al; 1110 } 1111 1112 static int getCFIAttr(fir::BoxType boxTy) { 1113 auto eleTy = boxTy.getEleTy(); 1114 if (eleTy.isa<fir::PointerType>()) 1115 return CFI_attribute_pointer; 1116 if (eleTy.isa<fir::HeapType>()) 1117 return CFI_attribute_allocatable; 1118 return CFI_attribute_other; 1119 } 1120 1121 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1122 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1123 .template dyn_cast<fir::RecordType>(); 1124 } 1125 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1126 auto recTy = unwrapIfDerived(boxTy); 1127 return recTy && recTy.getNumLenParams() > 0; 1128 } 1129 static bool isDerivedType(fir::BoxType boxTy) { 1130 return unwrapIfDerived(boxTy) != nullptr; 1131 } 1132 1133 // Get the element size and CFI type code of the boxed value. 1134 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1135 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1136 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1137 auto doInteger = 1138 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1139 int typeCode = fir::integerBitsToTypeCode(width); 1140 return {this->genConstantOffset(loc, rewriter, width / 8), 1141 this->genConstantOffset(loc, rewriter, typeCode)}; 1142 }; 1143 auto doLogical = 1144 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1145 int typeCode = fir::logicalBitsToTypeCode(width); 1146 return {this->genConstantOffset(loc, rewriter, width / 8), 1147 this->genConstantOffset(loc, rewriter, typeCode)}; 1148 }; 1149 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1150 int typeCode = fir::realBitsToTypeCode(width); 1151 return {this->genConstantOffset(loc, rewriter, width / 8), 1152 this->genConstantOffset(loc, rewriter, typeCode)}; 1153 }; 1154 auto doComplex = 1155 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1156 auto typeCode = fir::complexBitsToTypeCode(width); 1157 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1158 this->genConstantOffset(loc, rewriter, typeCode)}; 1159 }; 1160 auto doCharacter = 1161 [&](unsigned width, 1162 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1163 auto typeCode = fir::characterBitsToTypeCode(width); 1164 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1165 if (width == 8) 1166 return {len, typeCodeVal}; 1167 auto byteWidth = this->genConstantOffset(loc, rewriter, width / 8); 1168 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1169 auto size = 1170 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len); 1171 return {size, typeCodeVal}; 1172 }; 1173 auto getKindMap = [&]() -> fir::KindMapping & { 1174 return this->lowerTy().getKindMap(); 1175 }; 1176 // Pointer-like types. 1177 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1178 boxEleTy = eleTy; 1179 // Integer types. 1180 if (fir::isa_integer(boxEleTy)) { 1181 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1182 return doInteger(ty.getWidth()); 1183 auto ty = boxEleTy.cast<fir::IntegerType>(); 1184 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1185 } 1186 // Floating point types. 1187 if (fir::isa_real(boxEleTy)) { 1188 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1189 return doFloat(ty.getWidth()); 1190 auto ty = boxEleTy.cast<fir::RealType>(); 1191 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1192 } 1193 // Complex types. 1194 if (fir::isa_complex(boxEleTy)) { 1195 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1196 return doComplex( 1197 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1198 auto ty = boxEleTy.cast<fir::ComplexType>(); 1199 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1200 } 1201 // Character types. 1202 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1203 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1204 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1205 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1206 return doCharacter(charWidth, len); 1207 } 1208 assert(!lenParams.empty()); 1209 return doCharacter(charWidth, lenParams.back()); 1210 } 1211 // Logical type. 1212 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1213 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1214 // Array types. 1215 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1216 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1217 // Derived-type types. 1218 if (boxEleTy.isa<fir::RecordType>()) { 1219 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1220 this->lowerTy().convertType(boxEleTy)); 1221 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1222 auto one = 1223 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1224 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1225 mlir::ValueRange{one}); 1226 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1227 loc, this->lowerTy().indexType(), gep); 1228 return {eleSize, 1229 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1230 } 1231 // Reference type. 1232 if (fir::isa_ref_type(boxEleTy)) { 1233 // FIXME: use the target pointer size rather than sizeof(void*) 1234 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1235 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1236 } 1237 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1238 } 1239 1240 /// Basic pattern to write a field in the descriptor 1241 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1242 mlir::Location loc, mlir::Value dest, 1243 llvm::ArrayRef<unsigned> fldIndexes, 1244 mlir::Value value, bool bitcast = false) const { 1245 auto boxTy = dest.getType(); 1246 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1247 if (bitcast) 1248 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1249 else 1250 value = this->integerCast(loc, rewriter, fldTy, value); 1251 llvm::SmallVector<mlir::Attribute, 2> attrs; 1252 for (auto i : fldIndexes) 1253 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1254 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1255 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1256 indexesAttr); 1257 } 1258 1259 inline mlir::Value 1260 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1261 mlir::Location loc, mlir::Value dest, 1262 mlir::Value base) const { 1263 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1264 /*bitCast=*/true); 1265 } 1266 1267 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1268 mlir::Location loc, mlir::Value dest, 1269 unsigned dim, mlir::Value lb) const { 1270 return insertField(rewriter, loc, dest, 1271 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1272 } 1273 1274 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1275 mlir::Location loc, mlir::Value dest, 1276 unsigned dim, mlir::Value extent) const { 1277 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1278 extent); 1279 } 1280 1281 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1282 mlir::Location loc, mlir::Value dest, 1283 unsigned dim, mlir::Value stride) const { 1284 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1285 stride); 1286 } 1287 1288 /// Get the address of the type descriptor global variable that was created by 1289 /// lowering for derived type \p recType. 1290 template <typename BOX> 1291 mlir::Value 1292 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1293 mlir::Location loc, fir::RecordType recType) const { 1294 std::string name = 1295 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1296 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1297 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1298 auto ty = mlir::LLVM::LLVMPointerType::get( 1299 this->lowerTy().convertType(global.getType())); 1300 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1301 global.getSymName()); 1302 } 1303 if (auto global = 1304 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1305 // The global may have already been translated to LLVM. 1306 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1307 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1308 global.getSymName()); 1309 } 1310 // Type info derived types do not have type descriptors since they are the 1311 // types defining type descriptors. 1312 if (!this->options.ignoreMissingTypeDescriptors && 1313 !fir::NameUniquer::belongsToModule( 1314 name, Fortran::semantics::typeInfoBuiltinModule)) 1315 fir::emitFatalError( 1316 loc, "runtime derived type info descriptor was not generated"); 1317 return rewriter.create<mlir::LLVM::NullOp>( 1318 loc, ::getVoidPtrType(box.getContext())); 1319 } 1320 1321 template <typename BOX> 1322 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1323 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1324 unsigned rank, mlir::ValueRange lenParams) const { 1325 auto loc = box.getLoc(); 1326 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1327 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1328 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1329 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1330 mlir::Value descriptor = 1331 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1332 1333 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1334 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1335 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1336 typeparams.push_back(box.substr()[1]); 1337 } 1338 1339 // Write each of the fields with the appropriate values 1340 auto [eleSize, cfiTy] = 1341 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1342 descriptor = 1343 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1344 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1345 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1346 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1347 this->genI32Constant(loc, rewriter, rank)); 1348 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1349 descriptor = 1350 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1351 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1352 const bool hasAddendum = isDerivedType(boxTy); 1353 descriptor = 1354 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1355 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1356 1357 if (hasAddendum) { 1358 auto isArray = 1359 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1360 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1361 auto typeDesc = 1362 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1363 descriptor = 1364 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1365 /*bitCast=*/true); 1366 } 1367 1368 return {boxTy, descriptor, eleSize}; 1369 } 1370 1371 /// Compute the base address of a substring given the base address of a scalar 1372 /// string and the zero based string lower bound. 1373 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1374 mlir::Location loc, mlir::Value base, 1375 mlir::Value lowerBound) const { 1376 llvm::SmallVector<mlir::Value> gepOperands; 1377 auto baseType = 1378 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1379 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1380 auto idxTy = this->lowerTy().indexType(); 1381 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1382 gepOperands.push_back(zero); 1383 } 1384 gepOperands.push_back(lowerBound); 1385 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1386 } 1387 1388 /// If the embox is not in a globalOp body, allocate storage for the box; 1389 /// store the value inside and return the generated alloca. Return the input 1390 /// value otherwise. 1391 mlir::Value 1392 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1393 mlir::Location loc, mlir::Value boxValue) const { 1394 auto *thisBlock = rewriter.getInsertionBlock(); 1395 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1396 return boxValue; 1397 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1398 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1399 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1400 return alloca; 1401 } 1402 }; 1403 1404 /// Compute the extent of a triplet slice (lb:ub:step). 1405 static mlir::Value 1406 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1407 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1408 mlir::Value step, mlir::Value zero, mlir::Type type) { 1409 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1410 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1411 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1412 // If the resulting extent is negative (`ub-lb` and `step` have different 1413 // signs), zero must be returned instead. 1414 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1415 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1416 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1417 } 1418 1419 /// Create a generic box on a memory reference. This conversions lowers the 1420 /// abstract box to the appropriate, initialized descriptor. 1421 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1422 using EmboxCommonConversion::EmboxCommonConversion; 1423 1424 mlir::LogicalResult 1425 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1426 mlir::ConversionPatternRewriter &rewriter) const override { 1427 assert(!embox.getShape() && "There should be no dims on this embox op"); 1428 auto [boxTy, dest, eleSize] = 1429 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1430 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1431 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1432 adaptor.getOperands()[0]); 1433 if (isDerivedTypeWithLenParams(boxTy)) { 1434 TODO(embox.getLoc(), 1435 "fir.embox codegen of derived with length parameters"); 1436 return mlir::failure(); 1437 } 1438 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1439 rewriter.replaceOp(embox, result); 1440 return mlir::success(); 1441 } 1442 }; 1443 1444 /// Create a generic box on a memory reference. 1445 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1446 using EmboxCommonConversion::EmboxCommonConversion; 1447 1448 mlir::LogicalResult 1449 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1450 mlir::ConversionPatternRewriter &rewriter) const override { 1451 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1452 xbox, rewriter, xbox.getOutRank(), 1453 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1454 // Generate the triples in the dims field of the descriptor 1455 mlir::ValueRange operands = adaptor.getOperands(); 1456 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1457 mlir::Value base = operands[0]; 1458 assert(!xbox.shape().empty() && "must have a shape"); 1459 unsigned shapeOffset = xbox.shapeOffset(); 1460 bool hasShift = !xbox.shift().empty(); 1461 unsigned shiftOffset = xbox.shiftOffset(); 1462 bool hasSlice = !xbox.slice().empty(); 1463 unsigned sliceOffset = xbox.sliceOffset(); 1464 mlir::Location loc = xbox.getLoc(); 1465 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1466 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1467 mlir::Value prevDim = integerCast(loc, rewriter, i64Ty, eleSize); 1468 mlir::Value prevPtrOff = one; 1469 mlir::Type eleTy = boxTy.getEleTy(); 1470 const unsigned rank = xbox.getRank(); 1471 llvm::SmallVector<mlir::Value> gepArgs; 1472 unsigned constRows = 0; 1473 mlir::Value ptrOffset = zero; 1474 if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType())) 1475 if (auto seqTy = memEleTy.dyn_cast<fir::SequenceType>()) { 1476 mlir::Type seqEleTy = seqTy.getEleTy(); 1477 // Adjust the element scaling factor if the element is a dependent type. 1478 if (fir::hasDynamicSize(seqEleTy)) { 1479 if (fir::isa_char(seqEleTy)) { 1480 assert(xbox.lenParams().size() == 1); 1481 prevPtrOff = integerCast(loc, rewriter, i64Ty, 1482 operands[xbox.lenParamOffset()]); 1483 } else if (seqEleTy.isa<fir::RecordType>()) { 1484 TODO(loc, "generate call to calculate size of PDT"); 1485 } else { 1486 return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type"); 1487 } 1488 } else { 1489 constRows = seqTy.getConstantRows(); 1490 } 1491 } 1492 1493 bool hasSubcomp = !xbox.subcomponent().empty(); 1494 if (!xbox.substr().empty()) 1495 TODO(loc, "codegen of fir.embox with substring"); 1496 1497 mlir::Value stepExpr; 1498 if (hasSubcomp) { 1499 // We have a subcomponent. The step value needs to be the number of 1500 // bytes per element (which is a derived type). 1501 mlir::Type ty0 = base.getType(); 1502 [[maybe_unused]] auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1503 assert(ptrTy && "expected pointer type"); 1504 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1505 assert(memEleTy && "expected fir pointer type"); 1506 auto seqTy = memEleTy.dyn_cast<fir::SequenceType>(); 1507 assert(seqTy && "expected sequence type"); 1508 mlir::Type seqEleTy = seqTy.getEleTy(); 1509 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1510 stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1511 } 1512 1513 // Process the array subspace arguments (shape, shift, etc.), if any, 1514 // translating everything to values in the descriptor wherever the entity 1515 // has a dynamic array dimension. 1516 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1517 mlir::Value extent = operands[shapeOffset]; 1518 mlir::Value outerExtent = extent; 1519 bool skipNext = false; 1520 if (hasSlice) { 1521 mlir::Value off = operands[sliceOffset]; 1522 mlir::Value adj = one; 1523 if (hasShift) 1524 adj = operands[shiftOffset]; 1525 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1526 if (constRows > 0) { 1527 gepArgs.push_back(ao); 1528 } else { 1529 auto dimOff = 1530 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1531 ptrOffset = 1532 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1533 } 1534 if (mlir::isa_and_nonnull<fir::UndefOp>( 1535 xbox.slice()[3 * di + 1].getDefiningOp())) { 1536 // This dimension contains a scalar expression in the array slice op. 1537 // The dimension is loop invariant, will be dropped, and will not 1538 // appear in the descriptor. 1539 skipNext = true; 1540 } 1541 } 1542 if (!skipNext) { 1543 if (hasSlice) 1544 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1545 operands[sliceOffset + 1], 1546 operands[sliceOffset + 2], zero, i64Ty); 1547 // store lower bound (normally 0) for BIND(C) interoperability. 1548 mlir::Value lb = zero; 1549 const bool isaPointerOrAllocatable = 1550 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1551 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1552 // denormalized descriptors. 1553 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) { 1554 lb = one; 1555 // If there is a shifted origin, and no fir.slice, and this is not 1556 // a normalized descriptor then use the value from the shift op as 1557 // the lower bound. 1558 if (hasShift && !(hasSlice || hasSubcomp)) { 1559 lb = operands[shiftOffset]; 1560 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1561 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1562 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1563 lb); 1564 } 1565 } 1566 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1567 1568 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1569 1570 // store step (scaled by shaped extent) 1571 1572 mlir::Value step = hasSubcomp ? stepExpr : prevDim; 1573 if (hasSlice) 1574 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1575 operands[sliceOffset + 2]); 1576 dest = insertStride(rewriter, loc, dest, descIdx, step); 1577 ++descIdx; 1578 } 1579 1580 // compute the stride and offset for the next natural dimension 1581 prevDim = 1582 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent); 1583 if (constRows == 0) 1584 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1585 outerExtent); 1586 else 1587 --constRows; 1588 1589 // increment iterators 1590 ++shapeOffset; 1591 if (hasShift) 1592 ++shiftOffset; 1593 if (hasSlice) 1594 sliceOffset += 3; 1595 } 1596 if (hasSlice || hasSubcomp || !xbox.substr().empty()) { 1597 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1598 args.append(gepArgs.rbegin(), gepArgs.rend()); 1599 if (hasSubcomp) { 1600 // For each field in the path add the offset to base via the args list. 1601 // In the most general case, some offsets must be computed since 1602 // they are not be known until runtime. 1603 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1604 fir::unwrapPassByRefType(xbox.memref().getType())))) 1605 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1606 args.append(operands.begin() + xbox.subcomponentOffset(), 1607 operands.begin() + xbox.subcomponentOffset() + 1608 xbox.subcomponent().size()); 1609 } 1610 base = 1611 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1612 if (!xbox.substr().empty()) 1613 base = shiftSubstringBase(rewriter, loc, base, 1614 operands[xbox.substrOffset()]); 1615 } 1616 dest = insertBaseAddress(rewriter, loc, dest, base); 1617 if (isDerivedTypeWithLenParams(boxTy)) 1618 TODO(loc, "fir.embox codegen of derived with length parameters"); 1619 1620 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1621 rewriter.replaceOp(xbox, result); 1622 return mlir::success(); 1623 } 1624 1625 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1626 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1627 /// zero origin lower bound for interoperability with BIND(C). 1628 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1629 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1630 } 1631 }; 1632 1633 /// Create a new box given a box reference. 1634 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1635 using EmboxCommonConversion::EmboxCommonConversion; 1636 1637 mlir::LogicalResult 1638 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1639 mlir::ConversionPatternRewriter &rewriter) const override { 1640 mlir::Location loc = rebox.getLoc(); 1641 mlir::Type idxTy = lowerTy().indexType(); 1642 mlir::Value loweredBox = adaptor.getOperands()[0]; 1643 mlir::ValueRange operands = adaptor.getOperands(); 1644 1645 // Create new descriptor and fill its non-shape related data. 1646 llvm::SmallVector<mlir::Value, 2> lenParams; 1647 mlir::Type inputEleTy = getInputEleTy(rebox); 1648 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1649 mlir::Value len = 1650 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1651 if (charTy.getFKind() != 1) { 1652 mlir::Value width = 1653 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1654 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1655 } 1656 lenParams.emplace_back(len); 1657 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1658 if (recTy.getNumLenParams() != 0) 1659 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1660 } 1661 auto [boxTy, dest, eleSize] = 1662 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1663 1664 // Read input extents, strides, and base address 1665 llvm::SmallVector<mlir::Value> inputExtents; 1666 llvm::SmallVector<mlir::Value> inputStrides; 1667 const unsigned inputRank = rebox.getRank(); 1668 for (unsigned i = 0; i < inputRank; ++i) { 1669 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1670 llvm::SmallVector<mlir::Value, 3> dimInfo = 1671 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1672 inputExtents.emplace_back(dimInfo[1]); 1673 inputStrides.emplace_back(dimInfo[2]); 1674 } 1675 1676 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1677 mlir::Value baseAddr = 1678 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1679 1680 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1681 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1682 operands, rewriter); 1683 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1684 operands, rewriter); 1685 } 1686 1687 private: 1688 /// Write resulting shape and base address in descriptor, and replace rebox 1689 /// op. 1690 mlir::LogicalResult 1691 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1692 mlir::ValueRange lbounds, mlir::ValueRange extents, 1693 mlir::ValueRange strides, 1694 mlir::ConversionPatternRewriter &rewriter) const { 1695 mlir::Location loc = rebox.getLoc(); 1696 mlir::Value zero = 1697 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1698 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1699 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1700 mlir::Value extent = std::get<0>(iter.value()); 1701 unsigned dim = iter.index(); 1702 mlir::Value lb = one; 1703 if (!lbounds.empty()) { 1704 lb = lbounds[dim]; 1705 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1706 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1707 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1708 }; 1709 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1710 dest = insertExtent(rewriter, loc, dest, dim, extent); 1711 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1712 } 1713 dest = insertBaseAddress(rewriter, loc, dest, base); 1714 mlir::Value result = 1715 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1716 rewriter.replaceOp(rebox, result); 1717 return mlir::success(); 1718 } 1719 1720 // Apply slice given the base address, extents and strides of the input box. 1721 mlir::LogicalResult 1722 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1723 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1724 mlir::ValueRange operands, 1725 mlir::ConversionPatternRewriter &rewriter) const { 1726 mlir::Location loc = rebox.getLoc(); 1727 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1728 mlir::Type idxTy = lowerTy().indexType(); 1729 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1730 // Apply subcomponent and substring shift on base address. 1731 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1732 // Cast to inputEleTy* so that a GEP can be used. 1733 mlir::Type inputEleTy = getInputEleTy(rebox); 1734 auto llvmElePtrTy = 1735 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1736 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1737 1738 if (!rebox.subcomponent().empty()) { 1739 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1740 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1741 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1742 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1743 } 1744 if (!rebox.substr().empty()) 1745 base = shiftSubstringBase(rewriter, loc, base, 1746 operands[rebox.substrOffset()]); 1747 } 1748 1749 if (rebox.slice().empty()) 1750 // The array section is of the form array[%component][substring], keep 1751 // the input array extents and strides. 1752 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1753 inputExtents, inputStrides, rewriter); 1754 1755 // Strides from the fir.box are in bytes. 1756 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1757 1758 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1759 // and strides. 1760 llvm::SmallVector<mlir::Value> slicedExtents; 1761 llvm::SmallVector<mlir::Value> slicedStrides; 1762 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1763 const bool sliceHasOrigins = !rebox.shift().empty(); 1764 unsigned sliceOps = rebox.sliceOffset(); 1765 unsigned shiftOps = rebox.shiftOffset(); 1766 auto strideOps = inputStrides.begin(); 1767 const unsigned inputRank = inputStrides.size(); 1768 for (unsigned i = 0; i < inputRank; 1769 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1770 mlir::Value sliceLb = 1771 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1772 mlir::Value inputStride = *strideOps; // already idxTy 1773 // Apply origin shift: base += (lb-shift)*input_stride 1774 mlir::Value sliceOrigin = 1775 sliceHasOrigins 1776 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1777 : one; 1778 mlir::Value diff = 1779 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1780 mlir::Value offset = 1781 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1782 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1783 // Apply upper bound and step if this is a triplet. Otherwise, the 1784 // dimension is dropped and no extents/strides are computed. 1785 mlir::Value upper = operands[sliceOps + 1]; 1786 const bool isTripletSlice = 1787 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1788 if (isTripletSlice) { 1789 mlir::Value step = 1790 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1791 // extent = ub-lb+step/step 1792 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1793 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1794 sliceUb, step, zero, idxTy); 1795 slicedExtents.emplace_back(extent); 1796 // stride = step*input_stride 1797 mlir::Value stride = 1798 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1799 slicedStrides.emplace_back(stride); 1800 } 1801 } 1802 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1803 slicedExtents, slicedStrides, rewriter); 1804 } 1805 1806 /// Apply a new shape to the data described by a box given the base address, 1807 /// extents and strides of the box. 1808 mlir::LogicalResult 1809 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1810 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1811 mlir::ValueRange operands, 1812 mlir::ConversionPatternRewriter &rewriter) const { 1813 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1814 operands.begin() + rebox.shiftOffset() + 1815 rebox.shift().size()}; 1816 if (rebox.shape().empty()) { 1817 // Only setting new lower bounds. 1818 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1819 inputStrides, rewriter); 1820 } 1821 1822 mlir::Location loc = rebox.getLoc(); 1823 // Strides from the fir.box are in bytes. 1824 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1825 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1826 1827 llvm::SmallVector<mlir::Value> newStrides; 1828 llvm::SmallVector<mlir::Value> newExtents; 1829 mlir::Type idxTy = lowerTy().indexType(); 1830 // First stride from input box is kept. The rest is assumed contiguous 1831 // (it is not possible to reshape otherwise). If the input is scalar, 1832 // which may be OK if all new extents are ones, the stride does not 1833 // matter, use one. 1834 mlir::Value stride = inputStrides.empty() 1835 ? genConstantIndex(loc, idxTy, rewriter, 1) 1836 : inputStrides[0]; 1837 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1838 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1839 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1840 newExtents.emplace_back(extent); 1841 newStrides.emplace_back(stride); 1842 // nextStride = extent * stride; 1843 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1844 } 1845 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1846 rewriter); 1847 } 1848 1849 /// Return scalar element type of the input box. 1850 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1851 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1852 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1853 return seqTy.getEleTy(); 1854 return ty; 1855 } 1856 }; 1857 1858 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1859 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1860 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1861 using FIROpConversion::FIROpConversion; 1862 1863 mlir::LogicalResult 1864 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1865 mlir::ConversionPatternRewriter &rewriter) const override { 1866 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1867 return mlir::failure(); 1868 } 1869 }; 1870 1871 // Code shared between insert_value and extract_value Ops. 1872 struct ValueOpCommon { 1873 // Translate the arguments pertaining to any multidimensional array to 1874 // row-major order for LLVM-IR. 1875 static void toRowMajor(llvm::SmallVectorImpl<mlir::Attribute> &attrs, 1876 mlir::Type ty) { 1877 assert(ty && "type is null"); 1878 const auto end = attrs.size(); 1879 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1880 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1881 const auto dim = getDimension(seq); 1882 if (dim > 1) { 1883 auto ub = std::min(i + dim, end); 1884 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1885 i += dim - 1; 1886 } 1887 ty = getArrayElementType(seq); 1888 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1889 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1890 } else { 1891 llvm_unreachable("index into invalid type"); 1892 } 1893 } 1894 } 1895 1896 static llvm::SmallVector<mlir::Attribute> 1897 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1898 mlir::ArrayAttr arrAttr) { 1899 llvm::SmallVector<mlir::Attribute> attrs; 1900 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1901 if (i->isa<mlir::IntegerAttr>()) { 1902 attrs.push_back(*i); 1903 } else { 1904 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1905 ++i; 1906 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1907 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1908 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1909 } 1910 } 1911 return attrs; 1912 } 1913 1914 private: 1915 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1916 unsigned result = 1; 1917 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1918 eleTy; 1919 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1920 ++result; 1921 return result; 1922 } 1923 1924 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1925 auto eleTy = ty.getElementType(); 1926 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1927 eleTy = arrTy.getElementType(); 1928 return eleTy; 1929 } 1930 }; 1931 1932 namespace { 1933 /// Extract a subobject value from an ssa-value of aggregate type 1934 struct ExtractValueOpConversion 1935 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1936 public ValueOpCommon { 1937 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1938 1939 mlir::LogicalResult 1940 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1941 mlir::ConversionPatternRewriter &rewriter) const override { 1942 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1943 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1944 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1945 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1946 extractVal, ty, adaptor.getOperands()[0], position); 1947 return mlir::success(); 1948 } 1949 }; 1950 1951 /// InsertValue is the generalized instruction for the composition of new 1952 /// aggregate type values. 1953 struct InsertValueOpConversion 1954 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1955 public ValueOpCommon { 1956 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1957 1958 mlir::LogicalResult 1959 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1960 mlir::ConversionPatternRewriter &rewriter) const override { 1961 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1962 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1963 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1964 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1965 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 1966 position); 1967 return mlir::success(); 1968 } 1969 }; 1970 1971 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1972 struct InsertOnRangeOpConversion 1973 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1974 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1975 1976 // Increments an array of subscripts in a row major fasion. 1977 void incrementSubscripts(const llvm::SmallVector<uint64_t> &dims, 1978 llvm::SmallVector<uint64_t> &subscripts) const { 1979 for (size_t i = dims.size(); i > 0; --i) { 1980 if (++subscripts[i - 1] < dims[i - 1]) { 1981 return; 1982 } 1983 subscripts[i - 1] = 0; 1984 } 1985 } 1986 1987 mlir::LogicalResult 1988 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 1989 mlir::ConversionPatternRewriter &rewriter) const override { 1990 1991 llvm::SmallVector<uint64_t> dims; 1992 auto type = adaptor.getOperands()[0].getType(); 1993 1994 // Iteratively extract the array dimensions from the type. 1995 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1996 dims.push_back(t.getNumElements()); 1997 type = t.getElementType(); 1998 } 1999 2000 llvm::SmallVector<uint64_t> lBounds; 2001 llvm::SmallVector<uint64_t> uBounds; 2002 2003 // Unzip the upper and lower bound and convert to a row major format. 2004 mlir::DenseIntElementsAttr coor = range.getCoor(); 2005 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2006 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2007 uBounds.push_back(*i++); 2008 lBounds.push_back(*i); 2009 } 2010 2011 auto &subscripts = lBounds; 2012 auto loc = range.getLoc(); 2013 mlir::Value lastOp = adaptor.getOperands()[0]; 2014 mlir::Value insertVal = adaptor.getOperands()[1]; 2015 2016 auto i64Ty = rewriter.getI64Type(); 2017 while (subscripts != uBounds) { 2018 // Convert uint64_t's to Attribute's. 2019 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2020 for (const auto &subscript : subscripts) 2021 subscriptAttrs.push_back(mlir::IntegerAttr::get(i64Ty, subscript)); 2022 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2023 loc, ty, lastOp, insertVal, 2024 mlir::ArrayAttr::get(range.getContext(), subscriptAttrs)); 2025 2026 incrementSubscripts(dims, subscripts); 2027 } 2028 2029 // Convert uint64_t's to Attribute's. 2030 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2031 for (const auto &subscript : subscripts) 2032 subscriptAttrs.push_back( 2033 mlir::IntegerAttr::get(rewriter.getI64Type(), subscript)); 2034 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2035 2036 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2037 range, ty, lastOp, insertVal, 2038 mlir::ArrayAttr::get(range.getContext(), arrayRef)); 2039 2040 return mlir::success(); 2041 } 2042 }; 2043 } // namespace 2044 2045 namespace { 2046 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2047 /// shifted etc. array. 2048 /// (See the static restriction on coordinate_of.) array_coor determines the 2049 /// coordinate (location) of a specific element. 2050 struct XArrayCoorOpConversion 2051 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2052 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2053 2054 mlir::LogicalResult 2055 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2056 mlir::ConversionPatternRewriter &rewriter) const override { 2057 auto loc = coor.getLoc(); 2058 mlir::ValueRange operands = adaptor.getOperands(); 2059 unsigned rank = coor.getRank(); 2060 assert(coor.indices().size() == rank); 2061 assert(coor.shape().empty() || coor.shape().size() == rank); 2062 assert(coor.shift().empty() || coor.shift().size() == rank); 2063 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2064 mlir::Type idxTy = lowerTy().indexType(); 2065 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2066 mlir::Value prevExt = one; 2067 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2068 mlir::Value offset = zero; 2069 const bool isShifted = !coor.shift().empty(); 2070 const bool isSliced = !coor.slice().empty(); 2071 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2072 2073 auto indexOps = coor.indices().begin(); 2074 auto shapeOps = coor.shape().begin(); 2075 auto shiftOps = coor.shift().begin(); 2076 auto sliceOps = coor.slice().begin(); 2077 // For each dimension of the array, generate the offset calculation. 2078 for (unsigned i = 0; i < rank; 2079 ++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) { 2080 mlir::Value index = 2081 integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); 2082 mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, 2083 operands[coor.shiftOffset() + i]) 2084 : one; 2085 mlir::Value step = one; 2086 bool normalSlice = isSliced; 2087 // Compute zero based index in dimension i of the element, applying 2088 // potential triplets and lower bounds. 2089 if (isSliced) { 2090 mlir::Value ub = *(sliceOps + 1); 2091 normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp()); 2092 if (normalSlice) 2093 step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2)); 2094 } 2095 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2096 mlir::Value diff = 2097 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2098 if (normalSlice) { 2099 mlir::Value sliceLb = 2100 integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]); 2101 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2102 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2103 } 2104 // Update the offset given the stride and the zero based index `diff` 2105 // that was just computed. 2106 if (baseIsBoxed) { 2107 // Use stride in bytes from the descriptor. 2108 mlir::Value stride = 2109 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2110 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2111 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2112 } else { 2113 // Use stride computed at last iteration. 2114 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2115 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2116 // Compute next stride assuming contiguity of the base array 2117 // (in element number). 2118 auto nextExt = 2119 integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]); 2120 prevExt = 2121 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2122 } 2123 } 2124 2125 // Add computed offset to the base address. 2126 if (baseIsBoxed) { 2127 // Working with byte offsets. The base address is read from the fir.box. 2128 // and need to be casted to i8* to do the pointer arithmetic. 2129 mlir::Type baseTy = 2130 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2131 mlir::Value base = 2132 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2133 mlir::Type voidPtrTy = getVoidPtrType(); 2134 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2135 llvm::SmallVector<mlir::Value> args{offset}; 2136 auto addr = 2137 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2138 if (coor.subcomponent().empty()) { 2139 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2140 return mlir::success(); 2141 } 2142 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2143 args.clear(); 2144 args.push_back(zero); 2145 if (!coor.lenParams().empty()) { 2146 // If type parameters are present, then we don't want to use a GEPOp 2147 // as below, as the LLVM struct type cannot be statically defined. 2148 TODO(loc, "derived type with type parameters"); 2149 } 2150 // TODO: array offset subcomponents must be converted to LLVM's 2151 // row-major layout here. 2152 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2153 args.push_back(operands[i]); 2154 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2155 args); 2156 return mlir::success(); 2157 } 2158 2159 // The array was not boxed, so it must be contiguous. offset is therefore an 2160 // element offset and the base type is kept in the GEP unless the element 2161 // type size is itself dynamic. 2162 mlir::Value base; 2163 if (coor.subcomponent().empty()) { 2164 // No subcomponent. 2165 if (!coor.lenParams().empty()) { 2166 // Type parameters. Adjust element size explicitly. 2167 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2168 assert(eleTy && "result must be a reference-like type"); 2169 if (fir::characterWithDynamicLen(eleTy)) { 2170 assert(coor.lenParams().size() == 1); 2171 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2172 eleTy.cast<fir::CharacterType>().getFKind()); 2173 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2174 auto scaledBySize = 2175 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2176 auto length = 2177 integerCast(loc, rewriter, idxTy, 2178 adaptor.getOperands()[coor.lenParamsOffset()]); 2179 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2180 length); 2181 } else { 2182 TODO(loc, "compute size of derived type with type parameters"); 2183 } 2184 } 2185 // Cast the base address to a pointer to T. 2186 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2187 adaptor.getOperands()[0]); 2188 } else { 2189 // Operand #0 must have a pointer type. For subcomponent slicing, we 2190 // want to cast away the array type and have a plain struct type. 2191 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2192 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2193 assert(ptrTy && "expected pointer type"); 2194 mlir::Type eleTy = ptrTy.getElementType(); 2195 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2196 eleTy = arrTy.getElementType(); 2197 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2198 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2199 adaptor.getOperands()[0]); 2200 } 2201 llvm::SmallVector<mlir::Value> args = {offset}; 2202 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2203 args.push_back(operands[i]); 2204 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2205 return mlir::success(); 2206 } 2207 }; 2208 } // namespace 2209 2210 /// Convert to (memory) reference to a reference to a subobject. 2211 /// The coordinate_of op is a Swiss army knife operation that can be used on 2212 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2213 /// With unboxed arrays, there is the restriction that the array have a static 2214 /// shape in all but the last column. 2215 struct CoordinateOpConversion 2216 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2217 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2218 2219 mlir::LogicalResult 2220 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2221 mlir::ConversionPatternRewriter &rewriter) const override { 2222 mlir::ValueRange operands = adaptor.getOperands(); 2223 2224 mlir::Location loc = coor.getLoc(); 2225 mlir::Value base = operands[0]; 2226 mlir::Type baseObjectTy = coor.getBaseType(); 2227 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2228 assert(objectTy && "fir.coordinate_of expects a reference type"); 2229 2230 // Complex type - basically, extract the real or imaginary part 2231 if (fir::isa_complex(objectTy)) { 2232 mlir::LLVM::ConstantOp c0 = 2233 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2234 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]}; 2235 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2236 rewriter.replaceOp(coor, gep); 2237 return mlir::success(); 2238 } 2239 2240 // Boxed type - get the base pointer from the box 2241 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2242 return doRewriteBox(coor, ty, operands, loc, rewriter); 2243 2244 // Reference, pointer or a heap type 2245 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>()) 2246 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2247 2248 return rewriter.notifyMatchFailure( 2249 coor, "fir.coordinate_of base operand has unsupported type"); 2250 } 2251 2252 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) { 2253 return fir::hasDynamicSize(ty) 2254 ? op.getDefiningOp() 2255 ->getAttrOfType<mlir::IntegerAttr>("field") 2256 .getInt() 2257 : getIntValue(op); 2258 } 2259 2260 static int64_t getIntValue(mlir::Value val) { 2261 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2262 mlir::Operation *defop = val.getDefiningOp(); 2263 2264 if (auto constOp = mlir::dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2265 return constOp.value(); 2266 if (auto llConstOp = mlir::dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2267 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2268 return attr.getValue().getSExtValue(); 2269 fir::emitFatalError(val.getLoc(), "must be a constant"); 2270 } 2271 2272 static bool hasSubDimensions(mlir::Type type) { 2273 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2274 } 2275 2276 /// Check whether this form of `!fir.coordinate_of` is supported. These 2277 /// additional checks are required, because we are not yet able to convert 2278 /// all valid forms of `!fir.coordinate_of`. 2279 /// TODO: Either implement the unsupported cases or extend the verifier 2280 /// in FIROps.cpp instead. 2281 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) { 2282 const std::size_t numOfCoors = coors.size(); 2283 std::size_t i = 0; 2284 bool subEle = false; 2285 bool ptrEle = false; 2286 for (; i < numOfCoors; ++i) { 2287 mlir::Value nxtOpnd = coors[i]; 2288 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2289 subEle = true; 2290 i += arrTy.getDimension() - 1; 2291 type = arrTy.getEleTy(); 2292 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2293 subEle = true; 2294 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2295 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2296 subEle = true; 2297 type = tupTy.getType(getIntValue(nxtOpnd)); 2298 } else { 2299 ptrEle = true; 2300 } 2301 } 2302 if (ptrEle) 2303 return (!subEle) && (numOfCoors == 1); 2304 return subEle && (i >= numOfCoors); 2305 } 2306 2307 /// Walk the abstract memory layout and determine if the path traverses any 2308 /// array types with unknown shape. Return true iff all the array types have a 2309 /// constant shape along the path. 2310 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) { 2311 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) { 2312 mlir::Value nxtOpnd = coors[i]; 2313 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2314 if (fir::sequenceWithNonConstantShape(arrTy)) 2315 return false; 2316 i += arrTy.getDimension() - 1; 2317 type = arrTy.getEleTy(); 2318 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2319 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2320 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2321 type = strTy.getType(getIntValue(nxtOpnd)); 2322 } else { 2323 return true; 2324 } 2325 } 2326 return true; 2327 } 2328 2329 private: 2330 mlir::LogicalResult 2331 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2332 mlir::Location loc, 2333 mlir::ConversionPatternRewriter &rewriter) const { 2334 mlir::Type boxObjTy = coor.getBaseType(); 2335 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2336 2337 mlir::Value boxBaseAddr = operands[0]; 2338 2339 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2340 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2341 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2342 // %addr = coordinate_of %box, %lenp 2343 if (coor.getNumOperands() == 2) { 2344 mlir::Operation *coordinateDef = 2345 (*coor.getCoor().begin()).getDefiningOp(); 2346 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) 2347 TODO(loc, 2348 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2349 } 2350 2351 // 2. GENERAL CASE: 2352 // 2.1. (`fir.array`) 2353 // %box = ... : !fix.box<!fir.array<?xU>> 2354 // %idx = ... : index 2355 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2356 // 2.2 (`fir.derived`) 2357 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2358 // %idx = ... : i32 2359 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2360 // 2.3 (`fir.derived` inside `fir.array`) 2361 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2362 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2363 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2364 // 2.4. TODO: Either document or disable any other case that the following 2365 // implementation might convert. 2366 mlir::LLVM::ConstantOp c0 = 2367 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2368 mlir::Value resultAddr = 2369 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2370 boxBaseAddr, rewriter); 2371 // Component Type 2372 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2373 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2374 2375 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2376 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2377 if (i != 1) 2378 TODO(loc, "fir.array nested inside other array and/or derived type"); 2379 // Applies byte strides from the box. Ignore lower bound from box 2380 // since fir.coordinate_of indexes are zero based. Lowering takes care 2381 // of lower bound aspects. This both accounts for dynamically sized 2382 // types and non contiguous arrays. 2383 auto idxTy = lowerTy().indexType(); 2384 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2385 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2386 index < lastIndex; ++index) { 2387 mlir::Value stride = 2388 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2389 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2390 operands[index], stride); 2391 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2392 } 2393 auto voidPtrBase = 2394 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2395 llvm::SmallVector<mlir::Value> args{off}; 2396 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2397 voidPtrBase, args); 2398 i += arrTy.getDimension() - 1; 2399 cpnTy = arrTy.getEleTy(); 2400 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) { 2401 auto recRefTy = 2402 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2403 mlir::Value nxtOpnd = operands[i]; 2404 auto memObj = 2405 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2406 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2407 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2408 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); 2409 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2410 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2411 args); 2412 resultAddr = 2413 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2414 } else { 2415 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2416 } 2417 } 2418 2419 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2420 return mlir::success(); 2421 } 2422 2423 mlir::LogicalResult 2424 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2425 mlir::ValueRange operands, mlir::Location loc, 2426 mlir::ConversionPatternRewriter &rewriter) const { 2427 mlir::Type baseObjectTy = coor.getBaseType(); 2428 2429 // Component Type 2430 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2431 bool hasSubdimension = hasSubDimensions(cpnTy); 2432 bool columnIsDeferred = !hasSubdimension; 2433 2434 if (!supportedCoordinate(cpnTy, operands.drop_front(1))) 2435 TODO(loc, "unsupported combination of coordinate operands"); 2436 2437 const bool hasKnownShape = 2438 arraysHaveKnownShape(cpnTy, operands.drop_front(1)); 2439 2440 // If only the column is `?`, then we can simply place the column value in 2441 // the 0-th GEP position. 2442 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2443 if (!hasKnownShape) { 2444 const unsigned sz = arrTy.getDimension(); 2445 if (arraysHaveKnownShape(arrTy.getEleTy(), 2446 operands.drop_front(1 + sz))) { 2447 fir::SequenceType::ShapeRef shape = arrTy.getShape(); 2448 bool allConst = true; 2449 for (unsigned i = 0; i < sz - 1; ++i) { 2450 if (shape[i] < 0) { 2451 allConst = false; 2452 break; 2453 } 2454 } 2455 if (allConst) 2456 columnIsDeferred = true; 2457 } 2458 } 2459 } 2460 2461 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy))) 2462 return mlir::emitError( 2463 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2464 2465 if (hasKnownShape || columnIsDeferred) { 2466 llvm::SmallVector<mlir::Value> offs; 2467 if (hasKnownShape && hasSubdimension) { 2468 mlir::LLVM::ConstantOp c0 = 2469 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2470 offs.push_back(c0); 2471 } 2472 llvm::Optional<int> dims; 2473 llvm::SmallVector<mlir::Value> arrIdx; 2474 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { 2475 mlir::Value nxtOpnd = operands[i]; 2476 2477 if (!cpnTy) 2478 return mlir::emitError(loc, "invalid coordinate/check failed"); 2479 2480 // check if the i-th coordinate relates to an array 2481 if (dims.hasValue()) { 2482 arrIdx.push_back(nxtOpnd); 2483 int dimsLeft = *dims; 2484 if (dimsLeft > 1) { 2485 dims = dimsLeft - 1; 2486 continue; 2487 } 2488 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2489 // append array range in reverse (FIR arrays are column-major) 2490 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2491 arrIdx.clear(); 2492 dims.reset(); 2493 continue; 2494 } 2495 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2496 int d = arrTy.getDimension() - 1; 2497 if (d > 0) { 2498 dims = d; 2499 arrIdx.push_back(nxtOpnd); 2500 continue; 2501 } 2502 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2503 offs.push_back(nxtOpnd); 2504 continue; 2505 } 2506 2507 // check if the i-th coordinate relates to a field 2508 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) 2509 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2510 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>()) 2511 cpnTy = tupTy.getType(getIntValue(nxtOpnd)); 2512 else 2513 cpnTy = nullptr; 2514 2515 offs.push_back(nxtOpnd); 2516 } 2517 if (dims.hasValue()) 2518 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2519 mlir::Value base = operands[0]; 2520 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2521 rewriter.replaceOp(coor, retval); 2522 return mlir::success(); 2523 } 2524 2525 return mlir::emitError( 2526 loc, "fir.coordinate_of base operand has unsupported type"); 2527 } 2528 }; 2529 2530 /// Convert `fir.field_index`. The conversion depends on whether the size of 2531 /// the record is static or dynamic. 2532 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2533 using FIROpConversion::FIROpConversion; 2534 2535 // NB: most field references should be resolved by this point 2536 mlir::LogicalResult 2537 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2538 mlir::ConversionPatternRewriter &rewriter) const override { 2539 auto recTy = field.getOnType().cast<fir::RecordType>(); 2540 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2541 2542 if (!fir::hasDynamicSize(recTy)) { 2543 // Derived type has compile-time constant layout. Return index of the 2544 // component type in the parent type (to be used in GEP). 2545 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2546 field.getLoc(), rewriter, index)}); 2547 return mlir::success(); 2548 } 2549 2550 // Derived type has compile-time constant layout. Call the compiler 2551 // generated function to determine the byte offset of the field at runtime. 2552 // This returns a non-constant. 2553 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2554 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2555 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2556 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr( 2557 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2558 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2559 field, lowerTy().offsetType(), adaptor.getOperands(), 2560 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2561 return mlir::success(); 2562 } 2563 2564 // Re-Construct the name of the compiler generated method that calculates the 2565 // offset 2566 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2567 llvm::StringRef field) { 2568 return recTy.getName().str() + "P." + field.str() + ".offset"; 2569 } 2570 }; 2571 2572 /// Convert `fir.end` 2573 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2574 using FIROpConversion::FIROpConversion; 2575 2576 mlir::LogicalResult 2577 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2578 mlir::ConversionPatternRewriter &rewriter) const override { 2579 TODO(firEnd.getLoc(), "fir.end codegen"); 2580 return mlir::failure(); 2581 } 2582 }; 2583 2584 /// Lower `fir.gentypedesc` to a global constant. 2585 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2586 using FIROpConversion::FIROpConversion; 2587 2588 mlir::LogicalResult 2589 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2590 mlir::ConversionPatternRewriter &rewriter) const override { 2591 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2592 return mlir::failure(); 2593 } 2594 }; 2595 2596 /// Lower `fir.has_value` operation to `llvm.return` operation. 2597 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2598 using FIROpConversion::FIROpConversion; 2599 2600 mlir::LogicalResult 2601 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2602 mlir::ConversionPatternRewriter &rewriter) const override { 2603 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op, 2604 adaptor.getOperands()); 2605 return mlir::success(); 2606 } 2607 }; 2608 2609 /// Lower `fir.global` operation to `llvm.global` operation. 2610 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2611 /// if they are applied on the full range. 2612 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2613 using FIROpConversion::FIROpConversion; 2614 2615 mlir::LogicalResult 2616 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2617 mlir::ConversionPatternRewriter &rewriter) const override { 2618 auto tyAttr = convertType(global.getType()); 2619 if (global.getType().isa<fir::BoxType>()) 2620 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2621 auto loc = global.getLoc(); 2622 mlir::Attribute initAttr{}; 2623 if (global.getInitVal()) 2624 initAttr = global.getInitVal().getValue(); 2625 auto linkage = convertLinkage(global.getLinkName()); 2626 auto isConst = global.getConstant().hasValue(); 2627 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2628 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2629 auto &gr = g.getInitializerRegion(); 2630 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2631 if (!gr.empty()) { 2632 // Replace insert_on_range with a constant dense attribute if the 2633 // initialization is on the full range. 2634 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2635 for (auto insertOp : insertOnRangeOps) { 2636 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2637 auto seqTyAttr = convertType(insertOp.getType()); 2638 auto *op = insertOp.getVal().getDefiningOp(); 2639 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2640 if (!constant) { 2641 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2642 if (!convertOp) 2643 continue; 2644 constant = mlir::cast<mlir::arith::ConstantOp>( 2645 convertOp.getValue().getDefiningOp()); 2646 } 2647 mlir::Type vecType = mlir::VectorType::get( 2648 insertOp.getType().getShape(), constant.getType()); 2649 auto denseAttr = mlir::DenseElementsAttr::get( 2650 vecType.cast<mlir::ShapedType>(), constant.getValue()); 2651 rewriter.setInsertionPointAfter(insertOp); 2652 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2653 insertOp, seqTyAttr, denseAttr); 2654 } 2655 } 2656 } 2657 rewriter.eraseOp(global); 2658 return mlir::success(); 2659 } 2660 2661 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2662 fir::SequenceType seqTy) const { 2663 auto extents = seqTy.getShape(); 2664 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2665 return false; 2666 auto cur_index = indexes.value_begin<int64_t>(); 2667 for (unsigned i = 0; i < indexes.size(); i += 2) { 2668 if (*(cur_index++) != 0) 2669 return false; 2670 if (*(cur_index++) != extents[i / 2] - 1) 2671 return false; 2672 } 2673 return true; 2674 } 2675 2676 // TODO: String comparaison should be avoided. Replace linkName with an 2677 // enumeration. 2678 mlir::LLVM::Linkage 2679 convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const { 2680 if (optLinkage.hasValue()) { 2681 auto name = optLinkage.getValue(); 2682 if (name == "internal") 2683 return mlir::LLVM::Linkage::Internal; 2684 if (name == "linkonce") 2685 return mlir::LLVM::Linkage::Linkonce; 2686 if (name == "linkonce_odr") 2687 return mlir::LLVM::Linkage::LinkonceODR; 2688 if (name == "common") 2689 return mlir::LLVM::Linkage::Common; 2690 if (name == "weak") 2691 return mlir::LLVM::Linkage::Weak; 2692 } 2693 return mlir::LLVM::Linkage::External; 2694 } 2695 }; 2696 2697 /// `fir.load` --> `llvm.load` 2698 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2699 using FIROpConversion::FIROpConversion; 2700 2701 mlir::LogicalResult 2702 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2703 mlir::ConversionPatternRewriter &rewriter) const override { 2704 // fir.box is a special case because it is considered as an ssa values in 2705 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2706 // and fir.box end up being the same llvm types and loading a 2707 // fir.ref<fir.box> is actually a no op in LLVM. 2708 if (load.getType().isa<fir::BoxType>()) { 2709 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2710 } else { 2711 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2712 load, convertType(load.getType()), adaptor.getOperands(), 2713 load->getAttrs()); 2714 } 2715 return mlir::success(); 2716 } 2717 }; 2718 2719 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2720 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2721 /// math flags? 2722 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2723 using FIROpConversion::FIROpConversion; 2724 2725 mlir::LogicalResult 2726 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2727 mlir::ConversionPatternRewriter &rewriter) const override { 2728 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2729 return mlir::success(); 2730 } 2731 }; 2732 2733 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2734 llvm::Optional<mlir::ValueRange> destOps, 2735 mlir::ConversionPatternRewriter &rewriter, 2736 mlir::Block *newBlock) { 2737 if (destOps.hasValue()) 2738 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 2739 newBlock, mlir::ValueRange()); 2740 else 2741 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2742 } 2743 2744 template <typename A, typename B> 2745 static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps, 2746 mlir::ConversionPatternRewriter &rewriter) { 2747 if (destOps.hasValue()) 2748 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 2749 dest); 2750 else 2751 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2752 } 2753 2754 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2755 mlir::Block *dest, 2756 llvm::Optional<mlir::ValueRange> destOps, 2757 mlir::ConversionPatternRewriter &rewriter) { 2758 auto *thisBlock = rewriter.getInsertionBlock(); 2759 auto *newBlock = createBlock(rewriter, dest); 2760 rewriter.setInsertionPointToEnd(thisBlock); 2761 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2762 rewriter.setInsertionPointToEnd(newBlock); 2763 } 2764 2765 /// Conversion of `fir.select_case` 2766 /// 2767 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2768 /// Depending on the case condition type, one or several comparison and 2769 /// conditional branching can be generated. 2770 /// 2771 /// A a point value case such as `case(4)`, a lower bound case such as 2772 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2773 /// simple comparison between the selector value and the constant value in the 2774 /// case. The block associated with the case condition is then executed if 2775 /// the comparison succeed otherwise it branch to the next block with the 2776 /// comparison for the the next case conditon. 2777 /// 2778 /// A closed interval case condition such as `case(7:10)` is converted with a 2779 /// first comparison and conditional branching for the lower bound. If 2780 /// successful, it branch to a second block with the comparison for the 2781 /// upper bound in the same case condition. 2782 /// 2783 /// TODO: lowering of CHARACTER type cases is not handled yet. 2784 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2785 using FIROpConversion::FIROpConversion; 2786 2787 mlir::LogicalResult 2788 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2789 mlir::ConversionPatternRewriter &rewriter) const override { 2790 unsigned conds = caseOp.getNumConditions(); 2791 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2792 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2793 auto ty = caseOp.getSelector().getType(); 2794 if (ty.isa<fir::CharacterType>()) { 2795 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2796 return mlir::failure(); 2797 } 2798 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2799 auto loc = caseOp.getLoc(); 2800 for (unsigned t = 0; t != conds; ++t) { 2801 mlir::Block *dest = caseOp.getSuccessor(t); 2802 llvm::Optional<mlir::ValueRange> destOps = 2803 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2804 llvm::Optional<mlir::ValueRange> cmpOps = 2805 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2806 mlir::Value caseArg = *(cmpOps.getValue().begin()); 2807 mlir::Attribute attr = cases[t]; 2808 if (attr.isa<fir::PointIntervalAttr>()) { 2809 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2810 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2811 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2812 continue; 2813 } 2814 if (attr.isa<fir::LowerBoundAttr>()) { 2815 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2816 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2817 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2818 continue; 2819 } 2820 if (attr.isa<fir::UpperBoundAttr>()) { 2821 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2822 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2823 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2824 continue; 2825 } 2826 if (attr.isa<fir::ClosedIntervalAttr>()) { 2827 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2828 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2829 auto *thisBlock = rewriter.getInsertionBlock(); 2830 auto *newBlock1 = createBlock(rewriter, dest); 2831 auto *newBlock2 = createBlock(rewriter, dest); 2832 rewriter.setInsertionPointToEnd(thisBlock); 2833 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2834 rewriter.setInsertionPointToEnd(newBlock1); 2835 mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1); 2836 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2837 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2838 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2839 rewriter.setInsertionPointToEnd(newBlock2); 2840 continue; 2841 } 2842 assert(attr.isa<mlir::UnitAttr>()); 2843 assert((t + 1 == conds) && "unit must be last"); 2844 genBrOp(caseOp, dest, destOps, rewriter); 2845 } 2846 return mlir::success(); 2847 } 2848 }; 2849 2850 template <typename OP> 2851 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2852 typename OP::Adaptor adaptor, 2853 mlir::ConversionPatternRewriter &rewriter) { 2854 unsigned conds = select.getNumConditions(); 2855 auto cases = select.getCases().getValue(); 2856 mlir::Value selector = adaptor.getSelector(); 2857 auto loc = select.getLoc(); 2858 assert(conds > 0 && "select must have cases"); 2859 2860 llvm::SmallVector<mlir::Block *> destinations; 2861 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2862 mlir::Block *defaultDestination; 2863 mlir::ValueRange defaultOperands; 2864 llvm::SmallVector<int32_t> caseValues; 2865 2866 for (unsigned t = 0; t != conds; ++t) { 2867 mlir::Block *dest = select.getSuccessor(t); 2868 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2869 const mlir::Attribute &attr = cases[t]; 2870 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2871 destinations.push_back(dest); 2872 destinationsOperands.push_back(destOps.hasValue() ? *destOps 2873 : mlir::ValueRange{}); 2874 caseValues.push_back(intAttr.getInt()); 2875 continue; 2876 } 2877 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2878 assert((t + 1 == conds) && "unit must be last"); 2879 defaultDestination = dest; 2880 defaultOperands = destOps.hasValue() ? *destOps : mlir::ValueRange{}; 2881 } 2882 2883 // LLVM::SwitchOp takes a i32 type for the selector. 2884 if (select.getSelector().getType() != rewriter.getI32Type()) 2885 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(), 2886 selector); 2887 2888 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2889 select, selector, 2890 /*defaultDestination=*/defaultDestination, 2891 /*defaultOperands=*/defaultOperands, 2892 /*caseValues=*/caseValues, 2893 /*caseDestinations=*/destinations, 2894 /*caseOperands=*/destinationsOperands, 2895 /*branchWeights=*/llvm::ArrayRef<std::int32_t>()); 2896 } 2897 2898 /// conversion of fir::SelectOp to an if-then-else ladder 2899 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2900 using FIROpConversion::FIROpConversion; 2901 2902 mlir::LogicalResult 2903 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2904 mlir::ConversionPatternRewriter &rewriter) const override { 2905 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2906 return mlir::success(); 2907 } 2908 }; 2909 2910 /// conversion of fir::SelectRankOp to an if-then-else ladder 2911 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2912 using FIROpConversion::FIROpConversion; 2913 2914 mlir::LogicalResult 2915 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2916 mlir::ConversionPatternRewriter &rewriter) const override { 2917 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2918 return mlir::success(); 2919 } 2920 }; 2921 2922 /// Lower `fir.select_type` to LLVM IR dialect. 2923 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2924 using FIROpConversion::FIROpConversion; 2925 2926 mlir::LogicalResult 2927 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2928 mlir::ConversionPatternRewriter &rewriter) const override { 2929 mlir::emitError(select.getLoc(), 2930 "fir.select_type should have already been converted"); 2931 return mlir::failure(); 2932 } 2933 }; 2934 2935 /// `fir.store` --> `llvm.store` 2936 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2937 using FIROpConversion::FIROpConversion; 2938 2939 mlir::LogicalResult 2940 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2941 mlir::ConversionPatternRewriter &rewriter) const override { 2942 if (store.getValue().getType().isa<fir::BoxType>()) { 2943 // fir.box value is actually in memory, load it first before storing it. 2944 mlir::Location loc = store.getLoc(); 2945 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2946 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2947 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2948 adaptor.getOperands()[0]); 2949 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2950 store, val, adaptor.getOperands()[1]); 2951 } else { 2952 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2953 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2954 } 2955 return mlir::success(); 2956 } 2957 }; 2958 2959 namespace { 2960 2961 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2962 /// the character buffer and one for the buffer length. 2963 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2964 using FIROpConversion::FIROpConversion; 2965 2966 mlir::LogicalResult 2967 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2968 mlir::ConversionPatternRewriter &rewriter) const override { 2969 auto *ctx = unboxchar.getContext(); 2970 2971 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2972 mlir::Value tuple = adaptor.getOperands()[0]; 2973 mlir::Type tupleTy = tuple.getType(); 2974 2975 mlir::Location loc = unboxchar.getLoc(); 2976 mlir::Value ptrToBuffer = 2977 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2978 2979 mlir::LLVM::ExtractValueOp len = 2980 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2981 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2982 2983 rewriter.replaceOp(unboxchar, 2984 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2985 return mlir::success(); 2986 } 2987 }; 2988 2989 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 2990 /// components. 2991 /// TODO: Part of supporting Fortran 2003 procedure pointers. 2992 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 2993 using FIROpConversion::FIROpConversion; 2994 2995 mlir::LogicalResult 2996 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 2997 mlir::ConversionPatternRewriter &rewriter) const override { 2998 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 2999 return mlir::failure(); 3000 } 3001 }; 3002 3003 /// convert to LLVM IR dialect `undef` 3004 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 3005 using FIROpConversion::FIROpConversion; 3006 3007 mlir::LogicalResult 3008 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3009 mlir::ConversionPatternRewriter &rewriter) const override { 3010 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3011 undef, convertType(undef.getType())); 3012 return mlir::success(); 3013 } 3014 }; 3015 3016 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3017 using FIROpConversion::FIROpConversion; 3018 3019 mlir::LogicalResult 3020 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3021 mlir::ConversionPatternRewriter &rewriter) const override { 3022 mlir::Type ty = convertType(zero.getType()); 3023 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3024 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3025 } else if (ty.isa<mlir::IntegerType>()) { 3026 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3027 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3028 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3029 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3030 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3031 } else { 3032 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3033 return rewriter.notifyMatchFailure( 3034 zero, 3035 "conversion of fir.zero with aggregate type not implemented yet"); 3036 } 3037 return mlir::success(); 3038 } 3039 }; 3040 3041 /// `fir.unreachable` --> `llvm.unreachable` 3042 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3043 using FIROpConversion::FIROpConversion; 3044 3045 mlir::LogicalResult 3046 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3047 mlir::ConversionPatternRewriter &rewriter) const override { 3048 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3049 return mlir::success(); 3050 } 3051 }; 3052 3053 /// `fir.is_present` --> 3054 /// ``` 3055 /// %0 = llvm.mlir.constant(0 : i64) 3056 /// %1 = llvm.ptrtoint %0 3057 /// %2 = llvm.icmp "ne" %1, %0 : i64 3058 /// ``` 3059 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3060 using FIROpConversion::FIROpConversion; 3061 3062 mlir::LogicalResult 3063 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3064 mlir::ConversionPatternRewriter &rewriter) const override { 3065 mlir::Type idxTy = lowerTy().indexType(); 3066 mlir::Location loc = isPresent.getLoc(); 3067 auto ptr = adaptor.getOperands()[0]; 3068 3069 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3070 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3071 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3072 3073 mlir::Type ty = structTy.getBody()[0]; 3074 mlir::MLIRContext *ctx = isPresent.getContext(); 3075 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3076 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3077 } 3078 mlir::LLVM::ConstantOp c0 = 3079 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3080 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3081 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3082 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3083 3084 return mlir::success(); 3085 } 3086 }; 3087 3088 /// Create value signaling an absent optional argument in a call, e.g. 3089 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3090 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3091 using FIROpConversion::FIROpConversion; 3092 3093 mlir::LogicalResult 3094 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3095 mlir::ConversionPatternRewriter &rewriter) const override { 3096 mlir::Type ty = convertType(absent.getType()); 3097 mlir::Location loc = absent.getLoc(); 3098 3099 if (absent.getType().isa<fir::BoxCharType>()) { 3100 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3101 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3102 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3103 auto nullField = 3104 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3105 mlir::MLIRContext *ctx = absent.getContext(); 3106 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3107 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3108 absent, ty, undefStruct, nullField, c0); 3109 } else { 3110 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3111 } 3112 return mlir::success(); 3113 } 3114 }; 3115 3116 // 3117 // Primitive operations on Complex types 3118 // 3119 3120 /// Generate inline code for complex addition/subtraction 3121 template <typename LLVMOP, typename OPTY> 3122 static mlir::LLVM::InsertValueOp 3123 complexSum(OPTY sumop, mlir::ValueRange opnds, 3124 mlir::ConversionPatternRewriter &rewriter, 3125 fir::LLVMTypeConverter &lowering) { 3126 mlir::Value a = opnds[0]; 3127 mlir::Value b = opnds[1]; 3128 auto loc = sumop.getLoc(); 3129 auto ctx = sumop.getContext(); 3130 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3131 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3132 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3133 mlir::Type ty = lowering.convertType(sumop.getType()); 3134 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3135 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3136 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3137 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3138 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3139 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3140 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3141 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3142 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3143 } 3144 } // namespace 3145 3146 namespace { 3147 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3148 using FIROpConversion::FIROpConversion; 3149 3150 mlir::LogicalResult 3151 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3152 mlir::ConversionPatternRewriter &rewriter) const override { 3153 // given: (x + iy) + (x' + iy') 3154 // result: (x + x') + i(y + y') 3155 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3156 rewriter, lowerTy()); 3157 rewriter.replaceOp(addc, r.getResult()); 3158 return mlir::success(); 3159 } 3160 }; 3161 3162 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3163 using FIROpConversion::FIROpConversion; 3164 3165 mlir::LogicalResult 3166 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3167 mlir::ConversionPatternRewriter &rewriter) const override { 3168 // given: (x + iy) - (x' + iy') 3169 // result: (x - x') + i(y - y') 3170 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3171 rewriter, lowerTy()); 3172 rewriter.replaceOp(subc, r.getResult()); 3173 return mlir::success(); 3174 } 3175 }; 3176 3177 /// Inlined complex multiply 3178 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3179 using FIROpConversion::FIROpConversion; 3180 3181 mlir::LogicalResult 3182 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3183 mlir::ConversionPatternRewriter &rewriter) const override { 3184 // TODO: Can we use a call to __muldc3 ? 3185 // given: (x + iy) * (x' + iy') 3186 // result: (xx'-yy')+i(xy'+yx') 3187 mlir::Value a = adaptor.getOperands()[0]; 3188 mlir::Value b = adaptor.getOperands()[1]; 3189 auto loc = mulc.getLoc(); 3190 auto *ctx = mulc.getContext(); 3191 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3192 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3193 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3194 mlir::Type ty = convertType(mulc.getType()); 3195 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3196 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3197 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3198 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3199 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3200 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3201 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3202 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3203 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3204 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3205 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3206 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3207 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3208 rewriter.replaceOp(mulc, r0.getResult()); 3209 return mlir::success(); 3210 } 3211 }; 3212 3213 /// Inlined complex division 3214 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3215 using FIROpConversion::FIROpConversion; 3216 3217 mlir::LogicalResult 3218 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3219 mlir::ConversionPatternRewriter &rewriter) const override { 3220 // TODO: Can we use a call to __divdc3 instead? 3221 // Just generate inline code for now. 3222 // given: (x + iy) / (x' + iy') 3223 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3224 mlir::Value a = adaptor.getOperands()[0]; 3225 mlir::Value b = adaptor.getOperands()[1]; 3226 auto loc = divc.getLoc(); 3227 auto *ctx = divc.getContext(); 3228 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3229 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3230 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3231 mlir::Type ty = convertType(divc.getType()); 3232 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3233 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3234 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3235 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3236 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3237 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3238 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3239 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3240 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3241 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3242 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3243 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3244 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3245 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3246 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3247 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3248 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3249 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3250 rewriter.replaceOp(divc, r0.getResult()); 3251 return mlir::success(); 3252 } 3253 }; 3254 3255 /// Inlined complex negation 3256 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3257 using FIROpConversion::FIROpConversion; 3258 3259 mlir::LogicalResult 3260 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3261 mlir::ConversionPatternRewriter &rewriter) const override { 3262 // given: -(x + iy) 3263 // result: -x - iy 3264 auto *ctxt = neg.getContext(); 3265 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3266 auto ty = convertType(neg.getType()); 3267 auto loc = neg.getLoc(); 3268 mlir::Value o0 = adaptor.getOperands()[0]; 3269 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3270 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3271 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3272 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3273 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3274 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3275 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3276 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3277 return mlir::success(); 3278 } 3279 }; 3280 3281 /// Conversion pattern for operation that must be dead. The information in these 3282 /// operations is used by other operation. At this point they should not have 3283 /// anymore uses. 3284 /// These operations are normally dead after the pre-codegen pass. 3285 template <typename FromOp> 3286 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3287 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3288 const fir::FIRToLLVMPassOptions &options) 3289 : FIROpConversion<FromOp>(lowering, options) {} 3290 using OpAdaptor = typename FromOp::Adaptor; 3291 3292 mlir::LogicalResult 3293 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3294 mlir::ConversionPatternRewriter &rewriter) const final { 3295 if (!op->getUses().empty()) 3296 return rewriter.notifyMatchFailure(op, "op must be dead"); 3297 rewriter.eraseOp(op); 3298 return mlir::success(); 3299 } 3300 }; 3301 3302 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3303 using MustBeDeadConversion::MustBeDeadConversion; 3304 }; 3305 3306 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3307 using MustBeDeadConversion::MustBeDeadConversion; 3308 }; 3309 3310 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3311 using MustBeDeadConversion::MustBeDeadConversion; 3312 }; 3313 3314 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3315 using MustBeDeadConversion::MustBeDeadConversion; 3316 }; 3317 3318 } // namespace 3319 3320 namespace { 3321 /// Convert FIR dialect to LLVM dialect 3322 /// 3323 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3324 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3325 /// 3326 /// This pass is not complete yet. We are upstreaming it in small patches. 3327 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3328 public: 3329 FIRToLLVMLowering() = default; 3330 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3331 mlir::ModuleOp getModule() { return getOperation(); } 3332 3333 void runOnOperation() override final { 3334 auto mod = getModule(); 3335 if (!forcedTargetTriple.empty()) 3336 fir::setTargetTriple(mod, forcedTargetTriple); 3337 3338 auto *context = getModule().getContext(); 3339 fir::LLVMTypeConverter typeConverter{getModule()}; 3340 mlir::RewritePatternSet pattern(context); 3341 pattern.insert< 3342 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3343 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3344 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3345 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3346 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3347 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3348 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3349 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3350 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3351 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3352 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3353 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3354 InsertValueOpConversion, IsPresentOpConversion, 3355 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3356 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3357 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3358 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3359 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3360 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3361 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3362 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3363 options); 3364 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3365 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3366 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3367 pattern); 3368 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3369 pattern); 3370 mlir::ConversionTarget target{*context}; 3371 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3372 // The OpenMP dialect is legal for Operations without regions, for those 3373 // which contains regions it is legal if the region contains only the 3374 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and 3375 // legalize conversion of OpenMP operations without regions. 3376 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter); 3377 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3378 3379 // required NOPs for applying a full conversion 3380 target.addLegalOp<mlir::ModuleOp>(); 3381 3382 // apply the patterns 3383 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3384 std::move(pattern)))) { 3385 signalPassFailure(); 3386 } 3387 } 3388 3389 private: 3390 fir::FIRToLLVMPassOptions options; 3391 }; 3392 3393 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3394 struct LLVMIRLoweringPass 3395 : public mlir::PassWrapper<LLVMIRLoweringPass, 3396 mlir::OperationPass<mlir::ModuleOp>> { 3397 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass) 3398 3399 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p) 3400 : output{output}, printer{p} {} 3401 3402 mlir::ModuleOp getModule() { return getOperation(); } 3403 3404 void runOnOperation() override final { 3405 auto *ctx = getModule().getContext(); 3406 auto optName = getModule().getName(); 3407 llvm::LLVMContext llvmCtx; 3408 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3409 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3410 printer(*llvmModule, output); 3411 return; 3412 } 3413 3414 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3415 signalPassFailure(); 3416 } 3417 3418 private: 3419 llvm::raw_ostream &output; 3420 fir::LLVMIRLoweringPrinter printer; 3421 }; 3422 3423 } // namespace 3424 3425 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3426 return std::make_unique<FIRToLLVMLowering>(); 3427 } 3428 3429 std::unique_ptr<mlir::Pass> 3430 fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) { 3431 return std::make_unique<FIRToLLVMLowering>(options); 3432 } 3433 3434 std::unique_ptr<mlir::Pass> 3435 fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output, 3436 fir::LLVMIRLoweringPrinter printer) { 3437 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3438 } 3439