1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/TypeCode.h" 20 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 21 #include "mlir/Conversion/LLVMCommon/Pattern.h" 22 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 23 #include "mlir/IR/BuiltinTypes.h" 24 #include "mlir/IR/Matchers.h" 25 #include "mlir/Pass/Pass.h" 26 #include "llvm/ADT/ArrayRef.h" 27 28 #define DEBUG_TYPE "flang-codegen" 29 30 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 31 #include "TypeConverter.h" 32 33 // TODO: This should really be recovered from the specified target. 34 static constexpr unsigned defaultAlign = 8; 35 36 /// `fir.box` attribute values as defined for CFI_attribute_t in 37 /// flang/ISO_Fortran_binding.h. 38 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 39 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 40 41 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 42 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 43 } 44 45 static mlir::LLVM::ConstantOp 46 genConstantIndex(mlir::Location loc, mlir::Type ity, 47 mlir::ConversionPatternRewriter &rewriter, 48 std::int64_t offset) { 49 auto cattr = rewriter.getI64IntegerAttr(offset); 50 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 51 } 52 53 static Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 54 mlir::Block *insertBefore) { 55 assert(insertBefore && "expected valid insertion block"); 56 return rewriter.createBlock(insertBefore->getParent(), 57 mlir::Region::iterator(insertBefore)); 58 } 59 60 namespace { 61 /// FIR conversion pattern template 62 template <typename FromOp> 63 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 64 public: 65 explicit FIROpConversion(fir::LLVMTypeConverter &lowering) 66 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering) {} 67 68 protected: 69 mlir::Type convertType(mlir::Type ty) const { 70 return lowerTy().convertType(ty); 71 } 72 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 73 74 mlir::Type getVoidPtrType() const { 75 return mlir::LLVM::LLVMPointerType::get( 76 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 77 } 78 79 mlir::LLVM::ConstantOp 80 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 81 int value) const { 82 mlir::Type i32Ty = rewriter.getI32Type(); 83 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 84 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 85 } 86 87 mlir::LLVM::ConstantOp 88 genConstantOffset(mlir::Location loc, 89 mlir::ConversionPatternRewriter &rewriter, 90 int offset) const { 91 mlir::Type ity = lowerTy().offsetType(); 92 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 93 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 94 } 95 96 /// Construct code sequence to extract the specifc value from a `fir.box`. 97 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 98 mlir::Type resultTy, 99 mlir::ConversionPatternRewriter &rewriter, 100 unsigned boxValue) const { 101 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 102 mlir::LLVM::ConstantOp cValuePos = 103 genConstantOffset(loc, rewriter, boxValue); 104 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 105 auto p = rewriter.create<mlir::LLVM::GEPOp>( 106 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 107 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 108 } 109 110 /// Method to construct code sequence to get the triple for dimension `dim` 111 /// from a box. 112 SmallVector<mlir::Value, 3> 113 getDimsFromBox(mlir::Location loc, ArrayRef<mlir::Type> retTys, 114 mlir::Value box, mlir::Value dim, 115 mlir::ConversionPatternRewriter &rewriter) const { 116 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 117 mlir::LLVM::ConstantOp cDims = 118 genConstantOffset(loc, rewriter, kDimsPosInBox); 119 mlir::LLVM::LoadOp l0 = 120 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 121 mlir::LLVM::LoadOp l1 = 122 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 123 mlir::LLVM::LoadOp l2 = 124 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 125 return {l0.getResult(), l1.getResult(), l2.getResult()}; 126 } 127 128 mlir::LLVM::LoadOp 129 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 130 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 131 mlir::Type ty, 132 mlir::ConversionPatternRewriter &rewriter) const { 133 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 134 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 135 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 136 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 137 } 138 139 mlir::Value 140 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 141 mlir::ConversionPatternRewriter &rewriter) const { 142 auto idxTy = lowerTy().indexType(); 143 auto c0 = genConstantOffset(loc, rewriter, 0); 144 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 145 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 146 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 147 rewriter); 148 } 149 150 /// Read base address from a fir.box. Returned address has type ty. 151 mlir::Value 152 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 153 mlir::ConversionPatternRewriter &rewriter) const { 154 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 155 mlir::LLVM::ConstantOp cAddr = 156 genConstantOffset(loc, rewriter, kAddrPosInBox); 157 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 158 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 159 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 160 } 161 162 mlir::Value 163 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 164 mlir::ConversionPatternRewriter &rewriter) const { 165 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 166 mlir::LLVM::ConstantOp cElemLen = 167 genConstantOffset(loc, rewriter, kElemLenPosInBox); 168 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 169 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 170 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 171 } 172 173 // Load the attribute from the \p box and perform a check against \p maskValue 174 // The final comparison is implemented as `(attribute & maskValue) != 0`. 175 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 176 mlir::ConversionPatternRewriter &rewriter, 177 unsigned maskValue) const { 178 mlir::Type attrTy = rewriter.getI32Type(); 179 mlir::Value attribute = 180 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 181 mlir::LLVM::ConstantOp attrMask = 182 genConstantOffset(loc, rewriter, maskValue); 183 auto maskRes = 184 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 185 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 186 return rewriter.create<mlir::LLVM::ICmpOp>( 187 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 188 } 189 190 // Get the element type given an LLVM type that is of the form 191 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 192 static mlir::Type getBoxEleTy(mlir::Type type, 193 llvm::ArrayRef<unsigned> indexes) { 194 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 195 type = t.getElementType(); 196 for (auto i : indexes) { 197 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 198 assert(!t.isOpaque() && i < t.getBody().size()); 199 type = t.getBody()[i]; 200 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 201 type = t.getElementType(); 202 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 203 type = t.getElementType(); 204 } else { 205 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 206 "request for invalid box element type"); 207 } 208 } 209 return type; 210 } 211 212 // Return LLVM type of the base address given the LLVM type 213 // of the related descriptor (lowered fir.box type). 214 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 215 return getBoxEleTy(type, {kAddrPosInBox}); 216 } 217 218 template <typename... ARGS> 219 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 220 mlir::ConversionPatternRewriter &rewriter, 221 mlir::Value base, ARGS... args) const { 222 SmallVector<mlir::Value> cv{args...}; 223 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 224 } 225 226 /// Perform an extension or truncation as needed on an integer value. Lowering 227 /// to the specific target may involve some sign-extending or truncation of 228 /// values, particularly to fit them from abstract box types to the 229 /// appropriate reified structures. 230 mlir::Value integerCast(mlir::Location loc, 231 mlir::ConversionPatternRewriter &rewriter, 232 mlir::Type ty, mlir::Value val) const { 233 auto valTy = val.getType(); 234 // If the value was not yet lowered, lower its type so that it can 235 // be used in getPrimitiveTypeSizeInBits. 236 if (!valTy.isa<mlir::IntegerType>()) 237 valTy = convertType(valTy); 238 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 239 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 240 if (toSize < fromSize) 241 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 242 if (toSize > fromSize) 243 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 244 return val; 245 } 246 247 fir::LLVMTypeConverter &lowerTy() const { 248 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 249 } 250 }; 251 252 /// FIR conversion pattern template 253 template <typename FromOp> 254 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 255 public: 256 using FIROpConversion<FromOp>::FIROpConversion; 257 using OpAdaptor = typename FromOp::Adaptor; 258 259 mlir::LogicalResult 260 matchAndRewrite(FromOp op, OpAdaptor adaptor, 261 mlir::ConversionPatternRewriter &rewriter) const final { 262 mlir::Type ty = this->convertType(op.getType()); 263 return doRewrite(op, ty, adaptor, rewriter); 264 } 265 266 virtual mlir::LogicalResult 267 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 268 mlir::ConversionPatternRewriter &rewriter) const = 0; 269 }; 270 271 /// Create value signaling an absent optional argument in a call, e.g. 272 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 273 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 274 using FIROpConversion::FIROpConversion; 275 276 mlir::LogicalResult 277 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 278 mlir::ConversionPatternRewriter &rewriter) const override { 279 mlir::Type ty = convertType(absent.getType()); 280 mlir::Location loc = absent.getLoc(); 281 282 if (absent.getType().isa<fir::BoxCharType>()) { 283 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 284 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 285 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 286 auto nullField = 287 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 288 mlir::MLIRContext *ctx = absent.getContext(); 289 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 290 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 291 absent, ty, undefStruct, nullField, c0); 292 } else { 293 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 294 } 295 return success(); 296 } 297 }; 298 299 // Lower `fir.address_of` operation to `llvm.address_of` operation. 300 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 301 using FIROpConversion::FIROpConversion; 302 303 mlir::LogicalResult 304 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 305 mlir::ConversionPatternRewriter &rewriter) const override { 306 auto ty = convertType(addr.getType()); 307 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 308 addr, ty, addr.symbol().getRootReference().getValue()); 309 return success(); 310 } 311 }; 312 } // namespace 313 314 /// Lookup the function to compute the memory size of this parametric derived 315 /// type. The size of the object may depend on the LEN type parameters of the 316 /// derived type. 317 static mlir::LLVM::LLVMFuncOp 318 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 319 mlir::ConversionPatternRewriter &rewriter) { 320 auto module = op->getParentOfType<mlir::ModuleOp>(); 321 std::string name = recTy.getName().str() + "P.mem.size"; 322 return module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name); 323 } 324 325 namespace { 326 /// convert to LLVM IR dialect `alloca` 327 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 328 using FIROpConversion::FIROpConversion; 329 330 mlir::LogicalResult 331 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 332 mlir::ConversionPatternRewriter &rewriter) const override { 333 mlir::ValueRange operands = adaptor.getOperands(); 334 auto loc = alloc.getLoc(); 335 mlir::Type ity = lowerTy().indexType(); 336 unsigned i = 0; 337 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 338 mlir::Type ty = convertType(alloc.getType()); 339 mlir::Type resultTy = ty; 340 if (alloc.hasLenParams()) { 341 unsigned end = alloc.numLenParams(); 342 llvm::SmallVector<mlir::Value> lenParams; 343 for (; i < end; ++i) 344 lenParams.push_back(operands[i]); 345 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 346 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 347 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 348 chrTy.getContext(), chrTy.getFKind()); 349 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 350 assert(end == 1); 351 size = integerCast(loc, rewriter, ity, lenParams[0]); 352 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 353 mlir::LLVM::LLVMFuncOp memSizeFn = 354 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 355 if (!memSizeFn) 356 emitError(loc, "did not find allocation function"); 357 mlir::NamedAttribute attr = rewriter.getNamedAttr( 358 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 359 auto call = rewriter.create<mlir::LLVM::CallOp>( 360 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 361 size = call.getResult(0); 362 ty = mlir::LLVM::LLVMPointerType::get( 363 mlir::IntegerType::get(alloc.getContext(), 8)); 364 } else { 365 return emitError(loc, "unexpected type ") 366 << scalarType << " with type parameters"; 367 } 368 } 369 if (alloc.hasShapeOperands()) { 370 mlir::Type allocEleTy = fir::unwrapRefType(alloc.getType()); 371 // Scale the size by constant factors encoded in the array type. 372 // We only do this for arrays that don't have a constant interior, since 373 // those are the only ones that get decayed to a pointer to the element 374 // type. 375 if (auto seqTy = allocEleTy.dyn_cast<fir::SequenceType>()) { 376 if (!seqTy.hasConstantInterior()) { 377 fir::SequenceType::Extent constSize = 1; 378 for (auto extent : seqTy.getShape()) 379 if (extent != fir::SequenceType::getUnknownExtent()) 380 constSize *= extent; 381 mlir::Value constVal{ 382 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 383 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, constVal); 384 } 385 } 386 unsigned end = operands.size(); 387 for (; i < end; ++i) 388 size = rewriter.create<mlir::LLVM::MulOp>( 389 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 390 } 391 if (ty == resultTy) { 392 // Do not emit the bitcast if ty and resultTy are the same. 393 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 394 alloc->getAttrs()); 395 } else { 396 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 397 alloc->getAttrs()); 398 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 399 } 400 return success(); 401 } 402 }; 403 404 /// Lower `fir.box_addr` to the sequence of operations to extract the first 405 /// element of the box. 406 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 407 using FIROpConversion::FIROpConversion; 408 409 mlir::LogicalResult 410 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 411 mlir::ConversionPatternRewriter &rewriter) const override { 412 mlir::Value a = adaptor.getOperands()[0]; 413 auto loc = boxaddr.getLoc(); 414 mlir::Type ty = convertType(boxaddr.getType()); 415 if (auto argty = boxaddr.val().getType().dyn_cast<fir::BoxType>()) { 416 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 417 } else { 418 auto c0attr = rewriter.getI32IntegerAttr(0); 419 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 420 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 421 c0); 422 } 423 return success(); 424 } 425 }; 426 427 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 428 /// dimension infomartion from the boxed value. 429 /// Result in a triple set of GEPs and loads. 430 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 431 using FIROpConversion::FIROpConversion; 432 433 mlir::LogicalResult 434 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 435 mlir::ConversionPatternRewriter &rewriter) const override { 436 SmallVector<mlir::Type, 3> resultTypes = { 437 convertType(boxdims.getResult(0).getType()), 438 convertType(boxdims.getResult(1).getType()), 439 convertType(boxdims.getResult(2).getType()), 440 }; 441 auto results = 442 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 443 adaptor.getOperands()[1], rewriter); 444 rewriter.replaceOp(boxdims, results); 445 return success(); 446 } 447 }; 448 449 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 450 /// an element in the boxed value. 451 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 452 using FIROpConversion::FIROpConversion; 453 454 mlir::LogicalResult 455 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 456 mlir::ConversionPatternRewriter &rewriter) const override { 457 mlir::Value a = adaptor.getOperands()[0]; 458 auto loc = boxelesz.getLoc(); 459 auto ty = convertType(boxelesz.getType()); 460 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 461 rewriter.replaceOp(boxelesz, elemSize); 462 return success(); 463 } 464 }; 465 466 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 467 /// boxed value was from an ALLOCATABLE entity. 468 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 469 using FIROpConversion::FIROpConversion; 470 471 mlir::LogicalResult 472 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 473 mlir::ConversionPatternRewriter &rewriter) const override { 474 mlir::Value box = adaptor.getOperands()[0]; 475 auto loc = boxisalloc.getLoc(); 476 mlir::Value check = 477 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 478 rewriter.replaceOp(boxisalloc, check); 479 return success(); 480 } 481 }; 482 483 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 484 /// boxed is an array. 485 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 486 using FIROpConversion::FIROpConversion; 487 488 mlir::LogicalResult 489 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 490 mlir::ConversionPatternRewriter &rewriter) const override { 491 mlir::Value a = adaptor.getOperands()[0]; 492 auto loc = boxisarray.getLoc(); 493 auto rank = 494 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 495 auto c0 = genConstantOffset(loc, rewriter, 0); 496 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 497 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 498 return success(); 499 } 500 }; 501 502 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 503 /// boxed value was from a POINTER entity. 504 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 505 using FIROpConversion::FIROpConversion; 506 507 mlir::LogicalResult 508 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 509 mlir::ConversionPatternRewriter &rewriter) const override { 510 mlir::Value box = adaptor.getOperands()[0]; 511 auto loc = boxisptr.getLoc(); 512 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 513 rewriter.replaceOp(boxisptr, check); 514 return success(); 515 } 516 }; 517 518 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 519 /// the box. 520 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 521 using FIROpConversion::FIROpConversion; 522 523 mlir::LogicalResult 524 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 525 mlir::ConversionPatternRewriter &rewriter) const override { 526 mlir::Value a = adaptor.getOperands()[0]; 527 auto loc = boxrank.getLoc(); 528 mlir::Type ty = convertType(boxrank.getType()); 529 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 530 rewriter.replaceOp(boxrank, result); 531 return success(); 532 } 533 }; 534 535 /// Lower `fir.string_lit` to LLVM IR dialect operation. 536 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 537 using FIROpConversion::FIROpConversion; 538 539 mlir::LogicalResult 540 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 541 mlir::ConversionPatternRewriter &rewriter) const override { 542 auto ty = convertType(constop.getType()); 543 auto attr = constop.getValue(); 544 if (attr.isa<mlir::StringAttr>()) { 545 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 546 return success(); 547 } 548 549 auto arr = attr.cast<mlir::ArrayAttr>(); 550 auto charTy = constop.getType().cast<fir::CharacterType>(); 551 unsigned bits = lowerTy().characterBitsize(charTy); 552 mlir::Type intTy = rewriter.getIntegerType(bits); 553 auto attrs = llvm::map_range( 554 arr.getValue(), [intTy, bits](mlir::Attribute attr) -> Attribute { 555 return mlir::IntegerAttr::get( 556 intTy, 557 attr.cast<mlir::IntegerAttr>().getValue().sextOrTrunc(bits)); 558 }); 559 mlir::Type vecType = mlir::VectorType::get(arr.size(), intTy); 560 auto denseAttr = mlir::DenseElementsAttr::get( 561 vecType.cast<mlir::ShapedType>(), llvm::to_vector<8>(attrs)); 562 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>(constop, ty, 563 denseAttr); 564 return success(); 565 } 566 }; 567 568 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 569 /// boxproc. 570 /// TODO: Part of supporting Fortran 2003 procedure pointers. 571 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 572 using FIROpConversion::FIROpConversion; 573 574 mlir::LogicalResult 575 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 576 mlir::ConversionPatternRewriter &rewriter) const override { 577 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 578 return failure(); 579 } 580 }; 581 582 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 583 /// descriptor from the box. 584 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 585 using FIROpConversion::FIROpConversion; 586 587 mlir::LogicalResult 588 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 589 mlir::ConversionPatternRewriter &rewriter) const override { 590 mlir::Value box = adaptor.getOperands()[0]; 591 auto loc = boxtypedesc.getLoc(); 592 mlir::Type typeTy = 593 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 594 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 595 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 596 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 597 result); 598 return success(); 599 } 600 }; 601 602 // `fir.call` -> `llvm.call` 603 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 604 using FIROpConversion::FIROpConversion; 605 606 mlir::LogicalResult 607 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 608 mlir::ConversionPatternRewriter &rewriter) const override { 609 SmallVector<mlir::Type> resultTys; 610 for (auto r : call.getResults()) 611 resultTys.push_back(convertType(r.getType())); 612 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 613 call, resultTys, adaptor.getOperands(), call->getAttrs()); 614 return success(); 615 } 616 }; 617 } // namespace 618 619 static mlir::Type getComplexEleTy(mlir::Type complex) { 620 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 621 return cc.getElementType(); 622 return complex.cast<fir::ComplexType>().getElementType(); 623 } 624 625 namespace { 626 /// Compare complex values 627 /// 628 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 629 /// 630 /// For completeness, all other comparison are done on the real component only. 631 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 632 using FIROpConversion::FIROpConversion; 633 634 mlir::LogicalResult 635 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 636 mlir::ConversionPatternRewriter &rewriter) const override { 637 mlir::ValueRange operands = adaptor.getOperands(); 638 mlir::MLIRContext *ctxt = cmp.getContext(); 639 mlir::Type eleTy = convertType(getComplexEleTy(cmp.lhs().getType())); 640 mlir::Type resTy = convertType(cmp.getType()); 641 mlir::Location loc = cmp.getLoc(); 642 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 643 SmallVector<mlir::Value, 2> rp{rewriter.create<mlir::LLVM::ExtractValueOp>( 644 loc, eleTy, operands[0], pos0), 645 rewriter.create<mlir::LLVM::ExtractValueOp>( 646 loc, eleTy, operands[1], pos0)}; 647 auto rcp = 648 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 649 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 650 SmallVector<mlir::Value, 2> ip{rewriter.create<mlir::LLVM::ExtractValueOp>( 651 loc, eleTy, operands[0], pos1), 652 rewriter.create<mlir::LLVM::ExtractValueOp>( 653 loc, eleTy, operands[1], pos1)}; 654 auto icp = 655 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 656 SmallVector<mlir::Value, 2> cp{rcp, icp}; 657 switch (cmp.getPredicate()) { 658 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 659 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 660 break; 661 case mlir::arith::CmpFPredicate::UNE: // .NE. 662 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 663 break; 664 default: 665 rewriter.replaceOp(cmp, rcp.getResult()); 666 break; 667 } 668 return success(); 669 } 670 }; 671 672 /// Lower complex constants 673 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 674 using FIROpConversion::FIROpConversion; 675 676 mlir::LogicalResult 677 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 678 mlir::ConversionPatternRewriter &rewriter) const override { 679 mlir::Location loc = conc.getLoc(); 680 mlir::MLIRContext *ctx = conc.getContext(); 681 mlir::Type ty = convertType(conc.getType()); 682 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 683 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 684 auto realPart = 685 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 686 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 687 auto imPart = 688 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 689 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 690 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 691 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 692 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 693 loc, ty, undef, realPart, realIndex); 694 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 695 imPart, imIndex); 696 return success(); 697 } 698 699 inline APFloat getValue(mlir::Attribute attr) const { 700 return attr.cast<fir::RealAttr>().getValue(); 701 } 702 }; 703 704 /// convert value of from-type to value of to-type 705 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 706 using FIROpConversion::FIROpConversion; 707 708 static bool isFloatingPointTy(mlir::Type ty) { 709 return ty.isa<mlir::FloatType>(); 710 } 711 712 mlir::LogicalResult 713 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 714 mlir::ConversionPatternRewriter &rewriter) const override { 715 auto fromTy = convertType(convert.value().getType()); 716 auto toTy = convertType(convert.res().getType()); 717 mlir::Value op0 = adaptor.getOperands()[0]; 718 if (fromTy == toTy) { 719 rewriter.replaceOp(convert, op0); 720 return success(); 721 } 722 auto loc = convert.getLoc(); 723 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 724 unsigned toBits, mlir::Type toTy) -> mlir::Value { 725 if (fromBits == toBits) { 726 // TODO: Converting between two floating-point representations with the 727 // same bitwidth is not allowed for now. 728 mlir::emitError(loc, 729 "cannot implicitly convert between two floating-point " 730 "representations of the same bitwidth"); 731 return {}; 732 } 733 if (fromBits > toBits) 734 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 735 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 736 }; 737 // Complex to complex conversion. 738 if (fir::isa_complex(convert.value().getType()) && 739 fir::isa_complex(convert.res().getType())) { 740 // Special case: handle the conversion of a complex such that both the 741 // real and imaginary parts are converted together. 742 auto zero = mlir::ArrayAttr::get(convert.getContext(), 743 rewriter.getI32IntegerAttr(0)); 744 auto one = mlir::ArrayAttr::get(convert.getContext(), 745 rewriter.getI32IntegerAttr(1)); 746 auto ty = convertType(getComplexEleTy(convert.value().getType())); 747 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 748 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 749 auto nt = convertType(getComplexEleTy(convert.res().getType())); 750 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 751 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 752 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 753 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 754 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 755 auto i1 = 756 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 757 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 758 ic, one); 759 return mlir::success(); 760 } 761 // Floating point to floating point conversion. 762 if (isFloatingPointTy(fromTy)) { 763 if (isFloatingPointTy(toTy)) { 764 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 765 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 766 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 767 rewriter.replaceOp(convert, v); 768 return mlir::success(); 769 } 770 if (toTy.isa<mlir::IntegerType>()) { 771 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 772 return mlir::success(); 773 } 774 } else if (fromTy.isa<mlir::IntegerType>()) { 775 // Integer to integer conversion. 776 if (toTy.isa<mlir::IntegerType>()) { 777 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 778 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 779 assert(fromBits != toBits); 780 if (fromBits > toBits) { 781 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 782 return mlir::success(); 783 } 784 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 785 return mlir::success(); 786 } 787 // Integer to floating point conversion. 788 if (isFloatingPointTy(toTy)) { 789 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 790 return mlir::success(); 791 } 792 // Integer to pointer conversion. 793 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 794 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 795 return mlir::success(); 796 } 797 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 798 // Pointer to integer conversion. 799 if (toTy.isa<mlir::IntegerType>()) { 800 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 801 return mlir::success(); 802 } 803 // Pointer to pointer conversion. 804 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 805 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 806 return mlir::success(); 807 } 808 } 809 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 810 } 811 }; 812 813 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 814 /// table. 815 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 816 using FIROpConversion::FIROpConversion; 817 818 mlir::LogicalResult 819 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 820 mlir::ConversionPatternRewriter &rewriter) const override { 821 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 822 return failure(); 823 } 824 }; 825 826 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 827 /// derived type. 828 struct DispatchTableOpConversion 829 : public FIROpConversion<fir::DispatchTableOp> { 830 using FIROpConversion::FIROpConversion; 831 832 mlir::LogicalResult 833 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 834 mlir::ConversionPatternRewriter &rewriter) const override { 835 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 836 return failure(); 837 } 838 }; 839 840 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 841 /// method-name to a function. 842 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 843 using FIROpConversion::FIROpConversion; 844 845 mlir::LogicalResult 846 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 847 mlir::ConversionPatternRewriter &rewriter) const override { 848 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 849 return failure(); 850 } 851 }; 852 853 /// Lower `fir.global_len` operation. 854 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 855 using FIROpConversion::FIROpConversion; 856 857 mlir::LogicalResult 858 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 859 mlir::ConversionPatternRewriter &rewriter) const override { 860 TODO(globalLen.getLoc(), "fir.global_len codegen"); 861 return failure(); 862 } 863 }; 864 865 /// Lower fir.len_param_index 866 struct LenParamIndexOpConversion 867 : public FIROpConversion<fir::LenParamIndexOp> { 868 using FIROpConversion::FIROpConversion; 869 870 // FIXME: this should be specialized by the runtime target 871 mlir::LogicalResult 872 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 873 mlir::ConversionPatternRewriter &rewriter) const override { 874 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 875 } 876 }; 877 878 /// Lower `fir.gentypedesc` to a global constant. 879 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 880 using FIROpConversion::FIROpConversion; 881 882 mlir::LogicalResult 883 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 884 mlir::ConversionPatternRewriter &rewriter) const override { 885 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 886 return failure(); 887 } 888 }; 889 } // namespace 890 891 /// Return the LLVMFuncOp corresponding to the standard malloc call. 892 static mlir::LLVM::LLVMFuncOp 893 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 894 auto module = op->getParentOfType<mlir::ModuleOp>(); 895 if (mlir::LLVM::LLVMFuncOp mallocFunc = 896 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 897 return mallocFunc; 898 mlir::OpBuilder moduleBuilder( 899 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 900 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 901 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 902 rewriter.getUnknownLoc(), "malloc", 903 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 904 indexType, 905 /*isVarArg=*/false)); 906 } 907 908 /// Helper function for generating the LLVM IR that computes the size 909 /// in bytes for a derived type. 910 static mlir::Value 911 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 912 mlir::ConversionPatternRewriter &rewriter) { 913 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 914 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 915 llvm::SmallVector<mlir::Value> args{one}; 916 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 917 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 918 } 919 920 namespace { 921 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 922 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 923 using FIROpConversion::FIROpConversion; 924 925 mlir::LogicalResult 926 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 927 mlir::ConversionPatternRewriter &rewriter) const override { 928 mlir::Type ty = convertType(heap.getType()); 929 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 930 mlir::Location loc = heap.getLoc(); 931 auto ity = lowerTy().indexType(); 932 if (auto recTy = fir::unwrapSequenceType(heap.getAllocatedType()) 933 .dyn_cast<fir::RecordType>()) 934 if (recTy.getNumLenParams() != 0) { 935 TODO(loc, 936 "fir.allocmem codegen of derived type with length parameters"); 937 return failure(); 938 } 939 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 940 for (mlir::Value opnd : adaptor.getOperands()) 941 size = rewriter.create<mlir::LLVM::MulOp>( 942 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 943 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 944 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 945 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 946 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 947 malloc.getResult(0)); 948 return success(); 949 } 950 951 // Compute the (allocation) size of the allocmem type in bytes. 952 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 953 mlir::ConversionPatternRewriter &rewriter, 954 mlir::Type llTy) const { 955 // Use the primitive size, if available. 956 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 957 if (auto size = 958 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 959 return genConstantIndex(loc, idxTy, rewriter, size / 8); 960 961 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 962 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 963 } 964 }; 965 } // namespace 966 967 /// Return the LLVMFuncOp corresponding to the standard free call. 968 static mlir::LLVM::LLVMFuncOp 969 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 970 auto module = op->getParentOfType<mlir::ModuleOp>(); 971 if (mlir::LLVM::LLVMFuncOp freeFunc = 972 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 973 return freeFunc; 974 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 975 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 976 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 977 rewriter.getUnknownLoc(), "free", 978 mlir::LLVM::LLVMFunctionType::get(voidType, 979 getVoidPtrType(op.getContext()), 980 /*isVarArg=*/false)); 981 } 982 983 namespace { 984 /// Lower a `fir.freemem` instruction into `llvm.call @free` 985 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 986 using FIROpConversion::FIROpConversion; 987 988 mlir::LogicalResult 989 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 990 mlir::ConversionPatternRewriter &rewriter) const override { 991 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 992 mlir::Location loc = freemem.getLoc(); 993 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 994 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 995 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 996 rewriter.create<mlir::LLVM::CallOp>( 997 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 998 rewriter.eraseOp(freemem); 999 return success(); 1000 } 1001 }; 1002 1003 /// Convert `fir.end` 1004 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 1005 using FIROpConversion::FIROpConversion; 1006 1007 mlir::LogicalResult 1008 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 1009 mlir::ConversionPatternRewriter &rewriter) const override { 1010 TODO(firEnd.getLoc(), "fir.end codegen"); 1011 return failure(); 1012 } 1013 }; 1014 1015 /// Lower `fir.has_value` operation to `llvm.return` operation. 1016 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 1017 using FIROpConversion::FIROpConversion; 1018 1019 mlir::LogicalResult 1020 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 1021 mlir::ConversionPatternRewriter &rewriter) const override { 1022 rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(op, adaptor.getOperands()); 1023 return success(); 1024 } 1025 }; 1026 1027 /// Lower `fir.global` operation to `llvm.global` operation. 1028 /// `fir.insert_on_range` operations are replaced with constant dense attribute 1029 /// if they are applied on the full range. 1030 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 1031 using FIROpConversion::FIROpConversion; 1032 1033 mlir::LogicalResult 1034 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 1035 mlir::ConversionPatternRewriter &rewriter) const override { 1036 auto tyAttr = convertType(global.getType()); 1037 if (global.getType().isa<fir::BoxType>()) 1038 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1039 auto loc = global.getLoc(); 1040 mlir::Attribute initAttr{}; 1041 if (global.initVal()) 1042 initAttr = global.initVal().getValue(); 1043 auto linkage = convertLinkage(global.linkName()); 1044 auto isConst = global.constant().hasValue(); 1045 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 1046 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 1047 auto &gr = g.getInitializerRegion(); 1048 rewriter.inlineRegionBefore(global.region(), gr, gr.end()); 1049 if (!gr.empty()) { 1050 // Replace insert_on_range with a constant dense attribute if the 1051 // initialization is on the full range. 1052 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 1053 for (auto insertOp : insertOnRangeOps) { 1054 if (isFullRange(insertOp.coor(), insertOp.getType())) { 1055 auto seqTyAttr = convertType(insertOp.getType()); 1056 auto *op = insertOp.val().getDefiningOp(); 1057 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 1058 if (!constant) { 1059 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 1060 if (!convertOp) 1061 continue; 1062 constant = cast<mlir::arith::ConstantOp>( 1063 convertOp.value().getDefiningOp()); 1064 } 1065 mlir::Type vecType = mlir::VectorType::get( 1066 insertOp.getType().getShape(), constant.getType()); 1067 auto denseAttr = mlir::DenseElementsAttr::get( 1068 vecType.cast<ShapedType>(), constant.getValue()); 1069 rewriter.setInsertionPointAfter(insertOp); 1070 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 1071 insertOp, seqTyAttr, denseAttr); 1072 } 1073 } 1074 } 1075 rewriter.eraseOp(global); 1076 return success(); 1077 } 1078 1079 bool isFullRange(mlir::DenseIntElementsAttr indexes, 1080 fir::SequenceType seqTy) const { 1081 auto extents = seqTy.getShape(); 1082 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 1083 return false; 1084 auto cur_index = indexes.value_begin<int64_t>(); 1085 for (unsigned i = 0; i < indexes.size(); i += 2) { 1086 if (*(cur_index++) != 0) 1087 return false; 1088 if (*(cur_index++) != extents[i / 2] - 1) 1089 return false; 1090 } 1091 return true; 1092 } 1093 1094 // TODO: String comparaison should be avoided. Replace linkName with an 1095 // enumeration. 1096 mlir::LLVM::Linkage convertLinkage(Optional<StringRef> optLinkage) const { 1097 if (optLinkage.hasValue()) { 1098 auto name = optLinkage.getValue(); 1099 if (name == "internal") 1100 return mlir::LLVM::Linkage::Internal; 1101 if (name == "linkonce") 1102 return mlir::LLVM::Linkage::Linkonce; 1103 if (name == "common") 1104 return mlir::LLVM::Linkage::Common; 1105 if (name == "weak") 1106 return mlir::LLVM::Linkage::Weak; 1107 } 1108 return mlir::LLVM::Linkage::External; 1109 } 1110 }; 1111 } // namespace 1112 1113 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 1114 Optional<mlir::ValueRange> destOps, 1115 mlir::ConversionPatternRewriter &rewriter, 1116 mlir::Block *newBlock) { 1117 if (destOps.hasValue()) 1118 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 1119 newBlock, mlir::ValueRange()); 1120 else 1121 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 1122 } 1123 1124 template <typename A, typename B> 1125 static void genBrOp(A caseOp, mlir::Block *dest, Optional<B> destOps, 1126 mlir::ConversionPatternRewriter &rewriter) { 1127 if (destOps.hasValue()) 1128 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 1129 dest); 1130 else 1131 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 1132 } 1133 1134 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 1135 mlir::Block *dest, 1136 Optional<mlir::ValueRange> destOps, 1137 mlir::ConversionPatternRewriter &rewriter) { 1138 auto *thisBlock = rewriter.getInsertionBlock(); 1139 auto *newBlock = createBlock(rewriter, dest); 1140 rewriter.setInsertionPointToEnd(thisBlock); 1141 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 1142 rewriter.setInsertionPointToEnd(newBlock); 1143 } 1144 1145 namespace { 1146 /// Conversion of `fir.select_case` 1147 /// 1148 /// The `fir.select_case` operation is converted to a if-then-else ladder. 1149 /// Depending on the case condition type, one or several comparison and 1150 /// conditional branching can be generated. 1151 /// 1152 /// A a point value case such as `case(4)`, a lower bound case such as 1153 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 1154 /// simple comparison between the selector value and the constant value in the 1155 /// case. The block associated with the case condition is then executed if 1156 /// the comparison succeed otherwise it branch to the next block with the 1157 /// comparison for the the next case conditon. 1158 /// 1159 /// A closed interval case condition such as `case(7:10)` is converted with a 1160 /// first comparison and conditional branching for the lower bound. If 1161 /// successful, it branch to a second block with the comparison for the 1162 /// upper bound in the same case condition. 1163 /// 1164 /// TODO: lowering of CHARACTER type cases is not handled yet. 1165 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 1166 using FIROpConversion::FIROpConversion; 1167 1168 mlir::LogicalResult 1169 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 1170 mlir::ConversionPatternRewriter &rewriter) const override { 1171 unsigned conds = caseOp.getNumConditions(); 1172 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 1173 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 1174 auto ty = caseOp.getSelector().getType(); 1175 if (ty.isa<fir::CharacterType>()) { 1176 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 1177 return failure(); 1178 } 1179 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 1180 auto loc = caseOp.getLoc(); 1181 for (unsigned t = 0; t != conds; ++t) { 1182 mlir::Block *dest = caseOp.getSuccessor(t); 1183 llvm::Optional<mlir::ValueRange> destOps = 1184 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 1185 llvm::Optional<mlir::ValueRange> cmpOps = 1186 *caseOp.getCompareOperands(adaptor.getOperands(), t); 1187 mlir::Value caseArg = *(cmpOps.getValue().begin()); 1188 mlir::Attribute attr = cases[t]; 1189 if (attr.isa<fir::PointIntervalAttr>()) { 1190 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1191 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 1192 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1193 continue; 1194 } 1195 if (attr.isa<fir::LowerBoundAttr>()) { 1196 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1197 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 1198 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1199 continue; 1200 } 1201 if (attr.isa<fir::UpperBoundAttr>()) { 1202 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1203 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 1204 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1205 continue; 1206 } 1207 if (attr.isa<fir::ClosedIntervalAttr>()) { 1208 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1209 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 1210 auto *thisBlock = rewriter.getInsertionBlock(); 1211 auto *newBlock1 = createBlock(rewriter, dest); 1212 auto *newBlock2 = createBlock(rewriter, dest); 1213 rewriter.setInsertionPointToEnd(thisBlock); 1214 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 1215 rewriter.setInsertionPointToEnd(newBlock1); 1216 mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1); 1217 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 1218 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 1219 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 1220 rewriter.setInsertionPointToEnd(newBlock2); 1221 continue; 1222 } 1223 assert(attr.isa<mlir::UnitAttr>()); 1224 assert((t + 1 == conds) && "unit must be last"); 1225 genBrOp(caseOp, dest, destOps, rewriter); 1226 } 1227 return success(); 1228 } 1229 }; 1230 } // namespace 1231 1232 template <typename OP> 1233 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 1234 typename OP::Adaptor adaptor, 1235 mlir::ConversionPatternRewriter &rewriter) { 1236 unsigned conds = select.getNumConditions(); 1237 auto cases = select.getCases().getValue(); 1238 mlir::Value selector = adaptor.selector(); 1239 auto loc = select.getLoc(); 1240 assert(conds > 0 && "select must have cases"); 1241 1242 llvm::SmallVector<mlir::Block *> destinations; 1243 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 1244 mlir::Block *defaultDestination; 1245 mlir::ValueRange defaultOperands; 1246 llvm::SmallVector<int32_t> caseValues; 1247 1248 for (unsigned t = 0; t != conds; ++t) { 1249 mlir::Block *dest = select.getSuccessor(t); 1250 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 1251 const mlir::Attribute &attr = cases[t]; 1252 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 1253 destinations.push_back(dest); 1254 destinationsOperands.push_back(destOps.hasValue() ? *destOps 1255 : ValueRange()); 1256 caseValues.push_back(intAttr.getInt()); 1257 continue; 1258 } 1259 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 1260 assert((t + 1 == conds) && "unit must be last"); 1261 defaultDestination = dest; 1262 defaultOperands = destOps.hasValue() ? *destOps : ValueRange(); 1263 } 1264 1265 // LLVM::SwitchOp takes a i32 type for the selector. 1266 if (select.getSelector().getType() != rewriter.getI32Type()) 1267 selector = 1268 rewriter.create<LLVM::TruncOp>(loc, rewriter.getI32Type(), selector); 1269 1270 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 1271 select, selector, 1272 /*defaultDestination=*/defaultDestination, 1273 /*defaultOperands=*/defaultOperands, 1274 /*caseValues=*/caseValues, 1275 /*caseDestinations=*/destinations, 1276 /*caseOperands=*/destinationsOperands, 1277 /*branchWeights=*/ArrayRef<int32_t>()); 1278 } 1279 1280 namespace { 1281 /// conversion of fir::SelectOp to an if-then-else ladder 1282 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 1283 using FIROpConversion::FIROpConversion; 1284 1285 mlir::LogicalResult 1286 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 1287 mlir::ConversionPatternRewriter &rewriter) const override { 1288 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 1289 return success(); 1290 } 1291 }; 1292 1293 /// `fir.load` --> `llvm.load` 1294 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 1295 using FIROpConversion::FIROpConversion; 1296 1297 mlir::LogicalResult 1298 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 1299 mlir::ConversionPatternRewriter &rewriter) const override { 1300 // fir.box is a special case because it is considered as an ssa values in 1301 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 1302 // and fir.box end up being the same llvm types and loading a 1303 // fir.ref<fir.box> is actually a no op in LLVM. 1304 if (load.getType().isa<fir::BoxType>()) { 1305 rewriter.replaceOp(load, adaptor.getOperands()[0]); 1306 } else { 1307 mlir::Type ty = convertType(load.getType()); 1308 ArrayRef<NamedAttribute> at = load->getAttrs(); 1309 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 1310 load, ty, adaptor.getOperands(), at); 1311 } 1312 return success(); 1313 } 1314 }; 1315 1316 /// Lower `fir.no_reassoc` to LLVM IR dialect. 1317 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 1318 /// math flags? 1319 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 1320 using FIROpConversion::FIROpConversion; 1321 1322 mlir::LogicalResult 1323 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 1324 mlir::ConversionPatternRewriter &rewriter) const override { 1325 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 1326 return success(); 1327 } 1328 }; 1329 1330 /// Lower `fir.select_type` to LLVM IR dialect. 1331 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 1332 using FIROpConversion::FIROpConversion; 1333 1334 mlir::LogicalResult 1335 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 1336 mlir::ConversionPatternRewriter &rewriter) const override { 1337 mlir::emitError(select.getLoc(), 1338 "fir.select_type should have already been converted"); 1339 return failure(); 1340 } 1341 }; 1342 1343 /// conversion of fir::SelectRankOp to an if-then-else ladder 1344 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 1345 using FIROpConversion::FIROpConversion; 1346 1347 mlir::LogicalResult 1348 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 1349 mlir::ConversionPatternRewriter &rewriter) const override { 1350 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 1351 return success(); 1352 } 1353 }; 1354 1355 /// `fir.store` --> `llvm.store` 1356 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 1357 using FIROpConversion::FIROpConversion; 1358 1359 mlir::LogicalResult 1360 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 1361 mlir::ConversionPatternRewriter &rewriter) const override { 1362 if (store.value().getType().isa<fir::BoxType>()) { 1363 // fir.box value is actually in memory, load it first before storing it. 1364 mlir::Location loc = store.getLoc(); 1365 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 1366 auto val = rewriter.create<mlir::LLVM::LoadOp>( 1367 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 1368 adaptor.getOperands()[0]); 1369 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 1370 store, val, adaptor.getOperands()[1]); 1371 } else { 1372 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 1373 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 1374 } 1375 return success(); 1376 } 1377 }; 1378 1379 /// convert to LLVM IR dialect `undef` 1380 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 1381 using FIROpConversion::FIROpConversion; 1382 1383 mlir::LogicalResult 1384 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 1385 mlir::ConversionPatternRewriter &rewriter) const override { 1386 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 1387 undef, convertType(undef.getType())); 1388 return success(); 1389 } 1390 }; 1391 1392 /// `fir.unreachable` --> `llvm.unreachable` 1393 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 1394 using FIROpConversion::FIROpConversion; 1395 1396 mlir::LogicalResult 1397 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 1398 mlir::ConversionPatternRewriter &rewriter) const override { 1399 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 1400 return success(); 1401 } 1402 }; 1403 1404 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 1405 using FIROpConversion::FIROpConversion; 1406 1407 mlir::LogicalResult 1408 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 1409 mlir::ConversionPatternRewriter &rewriter) const override { 1410 mlir::Type ty = convertType(zero.getType()); 1411 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 1412 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 1413 } else if (ty.isa<mlir::IntegerType>()) { 1414 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 1415 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 1416 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 1417 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 1418 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 1419 } else { 1420 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 1421 return rewriter.notifyMatchFailure( 1422 zero, 1423 "conversion of fir.zero with aggregate type not implemented yet"); 1424 } 1425 return success(); 1426 } 1427 }; 1428 } // namespace 1429 1430 /// Common base class for embox to descriptor conversion. 1431 template <typename OP> 1432 struct EmboxCommonConversion : public FIROpConversion<OP> { 1433 using FIROpConversion<OP>::FIROpConversion; 1434 1435 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1436 // The order to find the LLVMFuncOp is as follows: 1437 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1438 // 2. The first ancestor that is a LLVMFuncOp. 1439 mlir::LLVM::LLVMFuncOp 1440 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1441 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1442 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1443 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1444 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1445 } 1446 1447 // Generate an alloca of size 1 and type \p toTy. 1448 mlir::LLVM::AllocaOp 1449 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1450 mlir::ConversionPatternRewriter &rewriter) const { 1451 auto thisPt = rewriter.saveInsertionPoint(); 1452 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1453 rewriter.setInsertionPointToStart(&func.front()); 1454 auto size = this->genI32Constant(loc, rewriter, 1); 1455 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1456 rewriter.restoreInsertionPoint(thisPt); 1457 return al; 1458 } 1459 1460 static int getCFIAttr(fir::BoxType boxTy) { 1461 auto eleTy = boxTy.getEleTy(); 1462 if (eleTy.isa<fir::PointerType>()) 1463 return CFI_attribute_pointer; 1464 if (eleTy.isa<fir::HeapType>()) 1465 return CFI_attribute_allocatable; 1466 return CFI_attribute_other; 1467 } 1468 1469 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1470 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1471 .template dyn_cast<fir::RecordType>(); 1472 } 1473 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1474 auto recTy = unwrapIfDerived(boxTy); 1475 return recTy && recTy.getNumLenParams() > 0; 1476 } 1477 static bool isDerivedType(fir::BoxType boxTy) { 1478 return unwrapIfDerived(boxTy) != nullptr; 1479 } 1480 1481 // Get the element size and CFI type code of the boxed value. 1482 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1483 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1484 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1485 auto doInteger = 1486 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1487 int typeCode = fir::integerBitsToTypeCode(width); 1488 return {this->genConstantOffset(loc, rewriter, width / 8), 1489 this->genConstantOffset(loc, rewriter, typeCode)}; 1490 }; 1491 auto doLogical = 1492 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1493 int typeCode = fir::logicalBitsToTypeCode(width); 1494 return {this->genConstantOffset(loc, rewriter, width / 8), 1495 this->genConstantOffset(loc, rewriter, typeCode)}; 1496 }; 1497 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1498 int typeCode = fir::realBitsToTypeCode(width); 1499 return {this->genConstantOffset(loc, rewriter, width / 8), 1500 this->genConstantOffset(loc, rewriter, typeCode)}; 1501 }; 1502 auto doComplex = 1503 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1504 auto typeCode = fir::complexBitsToTypeCode(width); 1505 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1506 this->genConstantOffset(loc, rewriter, typeCode)}; 1507 }; 1508 auto doCharacter = 1509 [&](unsigned width, 1510 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1511 auto typeCode = fir::characterBitsToTypeCode(width); 1512 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1513 if (width == 8) 1514 return {len, typeCodeVal}; 1515 auto byteWidth = this->genConstantOffset(loc, rewriter, width / 8); 1516 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1517 auto size = 1518 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len); 1519 return {size, typeCodeVal}; 1520 }; 1521 auto getKindMap = [&]() -> fir::KindMapping & { 1522 return this->lowerTy().getKindMap(); 1523 }; 1524 // Pointer-like types. 1525 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1526 boxEleTy = eleTy; 1527 // Integer types. 1528 if (fir::isa_integer(boxEleTy)) { 1529 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1530 return doInteger(ty.getWidth()); 1531 auto ty = boxEleTy.cast<fir::IntegerType>(); 1532 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1533 } 1534 // Floating point types. 1535 if (fir::isa_real(boxEleTy)) { 1536 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1537 return doFloat(ty.getWidth()); 1538 auto ty = boxEleTy.cast<fir::RealType>(); 1539 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1540 } 1541 // Complex types. 1542 if (fir::isa_complex(boxEleTy)) { 1543 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1544 return doComplex( 1545 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1546 auto ty = boxEleTy.cast<fir::ComplexType>(); 1547 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1548 } 1549 // Character types. 1550 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1551 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1552 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1553 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1554 return doCharacter(charWidth, len); 1555 } 1556 assert(!lenParams.empty()); 1557 return doCharacter(charWidth, lenParams.back()); 1558 } 1559 // Logical type. 1560 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1561 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1562 // Array types. 1563 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1564 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1565 // Derived-type types. 1566 if (boxEleTy.isa<fir::RecordType>()) { 1567 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1568 this->lowerTy().convertType(boxEleTy)); 1569 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1570 auto one = 1571 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1572 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1573 mlir::ValueRange{one}); 1574 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1575 loc, this->lowerTy().indexType(), gep); 1576 return {eleSize, 1577 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1578 } 1579 // Reference type. 1580 if (fir::isa_ref_type(boxEleTy)) { 1581 // FIXME: use the target pointer size rather than sizeof(void*) 1582 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1583 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1584 } 1585 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1586 } 1587 1588 /// Basic pattern to write a field in the descriptor 1589 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1590 mlir::Location loc, mlir::Value dest, 1591 ArrayRef<unsigned> fldIndexes, mlir::Value value, 1592 bool bitcast = false) const { 1593 auto boxTy = dest.getType(); 1594 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1595 if (bitcast) 1596 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1597 else 1598 value = this->integerCast(loc, rewriter, fldTy, value); 1599 SmallVector<mlir::Attribute, 2> attrs; 1600 for (auto i : fldIndexes) 1601 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1602 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1603 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1604 indexesAttr); 1605 } 1606 1607 inline mlir::Value 1608 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1609 mlir::Location loc, mlir::Value dest, 1610 mlir::Value base) const { 1611 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1612 /*bitCast=*/true); 1613 } 1614 1615 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1616 mlir::Location loc, mlir::Value dest, 1617 unsigned dim, mlir::Value lb) const { 1618 return insertField(rewriter, loc, dest, 1619 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1620 } 1621 1622 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1623 mlir::Location loc, mlir::Value dest, 1624 unsigned dim, mlir::Value extent) const { 1625 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1626 extent); 1627 } 1628 1629 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1630 mlir::Location loc, mlir::Value dest, 1631 unsigned dim, mlir::Value stride) const { 1632 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1633 stride); 1634 } 1635 1636 /// Get the address of the type descriptor global variable that was created by 1637 /// lowering for derived type \p recType. 1638 template <typename BOX> 1639 mlir::Value 1640 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1641 mlir::Location loc, fir::RecordType recType) const { 1642 std::string name = recType.getLoweredName(); 1643 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1644 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1645 auto ty = mlir::LLVM::LLVMPointerType::get( 1646 this->lowerTy().convertType(global.getType())); 1647 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1648 global.getSymName()); 1649 } 1650 if (auto global = 1651 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1652 // The global may have already been translated to LLVM. 1653 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1654 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1655 global.getSymName()); 1656 } 1657 // The global does not exist in the current translation unit, but may be 1658 // defined elsewhere (e.g., type defined in a module). 1659 // For now, create a extern_weak symbol (will become nullptr if unresolved) 1660 // to support generating code without the front-end generated symbols. 1661 // These could be made available_externally to require the symbols to be 1662 // defined elsewhere and to cause link-time failure otherwise. 1663 auto i8Ty = rewriter.getIntegerType(8); 1664 mlir::OpBuilder modBuilder(module.getBodyRegion()); 1665 // TODO: The symbol should be lowered to constant in lowering, they are read 1666 // only. 1667 modBuilder.create<mlir::LLVM::GlobalOp>(loc, i8Ty, /*isConstant=*/false, 1668 mlir::LLVM::Linkage::ExternWeak, 1669 name, mlir::Attribute{}); 1670 auto ty = mlir::LLVM::LLVMPointerType::get(i8Ty); 1671 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, name); 1672 } 1673 1674 template <typename BOX> 1675 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1676 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1677 unsigned rank, mlir::ValueRange lenParams) const { 1678 auto loc = box.getLoc(); 1679 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1680 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1681 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1682 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1683 mlir::Value descriptor = 1684 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1685 1686 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1687 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1688 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1689 typeparams.push_back(box.substr()[1]); 1690 } 1691 1692 // Write each of the fields with the appropriate values 1693 auto [eleSize, cfiTy] = 1694 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1695 descriptor = 1696 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1697 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1698 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1699 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1700 this->genI32Constant(loc, rewriter, rank)); 1701 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1702 descriptor = 1703 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1704 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1705 const bool hasAddendum = isDerivedType(boxTy); 1706 descriptor = 1707 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1708 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1709 1710 if (hasAddendum) { 1711 auto isArray = 1712 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1713 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1714 auto typeDesc = 1715 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1716 descriptor = 1717 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1718 /*bitCast=*/true); 1719 } 1720 1721 return {boxTy, descriptor, eleSize}; 1722 } 1723 1724 /// Compute the base address of a substring given the base address of a scalar 1725 /// string and the zero based string lower bound. 1726 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1727 mlir::Location loc, mlir::Value base, 1728 mlir::Value lowerBound) const { 1729 llvm::SmallVector<mlir::Value> gepOperands; 1730 auto baseType = 1731 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1732 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1733 auto idxTy = this->lowerTy().indexType(); 1734 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1735 gepOperands.push_back(zero); 1736 } 1737 gepOperands.push_back(lowerBound); 1738 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1739 } 1740 1741 /// If the embox is not in a globalOp body, allocate storage for the box; 1742 /// store the value inside and return the generated alloca. Return the input 1743 /// value otherwise. 1744 mlir::Value 1745 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1746 mlir::Location loc, mlir::Value boxValue) const { 1747 auto *thisBlock = rewriter.getInsertionBlock(); 1748 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1749 return boxValue; 1750 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1751 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1752 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1753 return alloca; 1754 } 1755 }; 1756 1757 /// Compute the extent of a triplet slice (lb:ub:step). 1758 static mlir::Value 1759 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1760 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1761 mlir::Value step, mlir::Value zero, mlir::Type type) { 1762 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1763 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1764 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1765 // If the resulting extent is negative (`ub-lb` and `step` have different 1766 // signs), zero must be returned instead. 1767 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1768 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1769 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1770 } 1771 1772 /// Create a generic box on a memory reference. This conversions lowers the 1773 /// abstract box to the appropriate, initialized descriptor. 1774 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1775 using EmboxCommonConversion::EmboxCommonConversion; 1776 1777 mlir::LogicalResult 1778 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1779 mlir::ConversionPatternRewriter &rewriter) const override { 1780 assert(!embox.getShape() && "There should be no dims on this embox op"); 1781 auto [boxTy, dest, eleSize] = 1782 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1783 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1784 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1785 adaptor.getOperands()[0]); 1786 if (isDerivedTypeWithLenParams(boxTy)) { 1787 TODO(embox.getLoc(), 1788 "fir.embox codegen of derived with length parameters"); 1789 return failure(); 1790 } 1791 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1792 rewriter.replaceOp(embox, result); 1793 return success(); 1794 } 1795 }; 1796 1797 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1798 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1799 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1800 using FIROpConversion::FIROpConversion; 1801 1802 mlir::LogicalResult 1803 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1804 mlir::ConversionPatternRewriter &rewriter) const override { 1805 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1806 return failure(); 1807 } 1808 }; 1809 1810 /// Create a generic box on a memory reference. 1811 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1812 using EmboxCommonConversion::EmboxCommonConversion; 1813 1814 mlir::LogicalResult 1815 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1816 mlir::ConversionPatternRewriter &rewriter) const override { 1817 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1818 xbox, rewriter, xbox.getOutRank(), 1819 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1820 // Generate the triples in the dims field of the descriptor 1821 mlir::ValueRange operands = adaptor.getOperands(); 1822 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1823 mlir::Value base = operands[0]; 1824 assert(!xbox.shape().empty() && "must have a shape"); 1825 unsigned shapeOffset = xbox.shapeOffset(); 1826 bool hasShift = !xbox.shift().empty(); 1827 unsigned shiftOffset = xbox.shiftOffset(); 1828 bool hasSlice = !xbox.slice().empty(); 1829 unsigned sliceOffset = xbox.sliceOffset(); 1830 mlir::Location loc = xbox.getLoc(); 1831 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1832 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1833 mlir::Value prevDim = integerCast(loc, rewriter, i64Ty, eleSize); 1834 mlir::Value prevPtrOff = one; 1835 mlir::Type eleTy = boxTy.getEleTy(); 1836 const unsigned rank = xbox.getRank(); 1837 llvm::SmallVector<mlir::Value> gepArgs; 1838 unsigned constRows = 0; 1839 mlir::Value ptrOffset = zero; 1840 if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType())) 1841 if (auto seqTy = memEleTy.dyn_cast<fir::SequenceType>()) { 1842 mlir::Type seqEleTy = seqTy.getEleTy(); 1843 // Adjust the element scaling factor if the element is a dependent type. 1844 if (fir::hasDynamicSize(seqEleTy)) { 1845 if (fir::isa_char(seqEleTy)) { 1846 assert(xbox.lenParams().size() == 1); 1847 prevPtrOff = integerCast(loc, rewriter, i64Ty, 1848 operands[xbox.lenParamOffset()]); 1849 } else if (seqEleTy.isa<fir::RecordType>()) { 1850 TODO(loc, "generate call to calculate size of PDT"); 1851 } else { 1852 return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type"); 1853 } 1854 } else { 1855 constRows = seqTy.getConstantRows(); 1856 } 1857 } 1858 1859 bool hasSubcomp = !xbox.subcomponent().empty(); 1860 mlir::Value stepExpr; 1861 if (hasSubcomp) { 1862 // We have a subcomponent. The step value needs to be the number of 1863 // bytes per element (which is a derived type). 1864 mlir::Type ty0 = base.getType(); 1865 [[maybe_unused]] auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1866 assert(ptrTy && "expected pointer type"); 1867 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1868 assert(memEleTy && "expected fir pointer type"); 1869 auto seqTy = memEleTy.dyn_cast<fir::SequenceType>(); 1870 assert(seqTy && "expected sequence type"); 1871 mlir::Type seqEleTy = seqTy.getEleTy(); 1872 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1873 stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1874 } 1875 1876 // Process the array subspace arguments (shape, shift, etc.), if any, 1877 // translating everything to values in the descriptor wherever the entity 1878 // has a dynamic array dimension. 1879 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1880 mlir::Value extent = operands[shapeOffset]; 1881 mlir::Value outerExtent = extent; 1882 bool skipNext = false; 1883 if (hasSlice) { 1884 mlir::Value off = operands[sliceOffset]; 1885 mlir::Value adj = one; 1886 if (hasShift) 1887 adj = operands[shiftOffset]; 1888 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1889 if (constRows > 0) { 1890 gepArgs.push_back(ao); 1891 --constRows; 1892 } else { 1893 auto dimOff = 1894 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1895 ptrOffset = 1896 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1897 } 1898 if (mlir::isa_and_nonnull<fir::UndefOp>( 1899 xbox.slice()[3 * di + 1].getDefiningOp())) { 1900 // This dimension contains a scalar expression in the array slice op. 1901 // The dimension is loop invariant, will be dropped, and will not 1902 // appear in the descriptor. 1903 skipNext = true; 1904 } 1905 } 1906 if (!skipNext) { 1907 // store lower bound (normally 0) 1908 mlir::Value lb = zero; 1909 if (eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>()) { 1910 lb = one; 1911 if (hasShift) 1912 lb = operands[shiftOffset]; 1913 } 1914 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1915 1916 // store extent 1917 if (hasSlice) 1918 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1919 operands[sliceOffset + 1], 1920 operands[sliceOffset + 2], zero, i64Ty); 1921 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1922 1923 // store step (scaled by shaped extent) 1924 1925 mlir::Value step = hasSubcomp ? stepExpr : prevDim; 1926 if (hasSlice) 1927 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1928 operands[sliceOffset + 2]); 1929 dest = insertStride(rewriter, loc, dest, descIdx, step); 1930 ++descIdx; 1931 } 1932 1933 // compute the stride and offset for the next natural dimension 1934 prevDim = 1935 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent); 1936 if (constRows == 0) 1937 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1938 outerExtent); 1939 1940 // increment iterators 1941 ++shapeOffset; 1942 if (hasShift) 1943 ++shiftOffset; 1944 if (hasSlice) 1945 sliceOffset += 3; 1946 } 1947 if (hasSlice || hasSubcomp || !xbox.substr().empty()) { 1948 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1949 args.append(gepArgs.rbegin(), gepArgs.rend()); 1950 if (hasSubcomp) { 1951 // For each field in the path add the offset to base via the args list. 1952 // In the most general case, some offsets must be computed since 1953 // they are not be known until runtime. 1954 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1955 fir::unwrapPassByRefType(xbox.memref().getType())))) 1956 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1957 args.append(operands.begin() + xbox.subcomponentOffset(), 1958 operands.begin() + xbox.subcomponentOffset() + 1959 xbox.subcomponent().size()); 1960 } 1961 base = 1962 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1963 if (!xbox.substr().empty()) 1964 base = shiftSubstringBase(rewriter, loc, base, 1965 operands[xbox.substrOffset()]); 1966 } 1967 dest = insertBaseAddress(rewriter, loc, dest, base); 1968 if (isDerivedTypeWithLenParams(boxTy)) 1969 TODO(loc, "fir.embox codegen of derived with length parameters"); 1970 1971 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1972 rewriter.replaceOp(xbox, result); 1973 return success(); 1974 } 1975 }; 1976 1977 /// Create a new box given a box reference. 1978 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1979 using EmboxCommonConversion::EmboxCommonConversion; 1980 1981 mlir::LogicalResult 1982 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1983 mlir::ConversionPatternRewriter &rewriter) const override { 1984 mlir::Location loc = rebox.getLoc(); 1985 mlir::Type idxTy = lowerTy().indexType(); 1986 mlir::Value loweredBox = adaptor.getOperands()[0]; 1987 mlir::ValueRange operands = adaptor.getOperands(); 1988 1989 // Create new descriptor and fill its non-shape related data. 1990 llvm::SmallVector<mlir::Value, 2> lenParams; 1991 mlir::Type inputEleTy = getInputEleTy(rebox); 1992 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1993 mlir::Value len = 1994 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1995 if (charTy.getFKind() != 1) { 1996 mlir::Value width = 1997 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1998 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1999 } 2000 lenParams.emplace_back(len); 2001 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 2002 if (recTy.getNumLenParams() != 0) 2003 TODO(loc, "reboxing descriptor of derived type with length parameters"); 2004 } 2005 auto [boxTy, dest, eleSize] = 2006 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 2007 2008 // Read input extents, strides, and base address 2009 llvm::SmallVector<mlir::Value> inputExtents; 2010 llvm::SmallVector<mlir::Value> inputStrides; 2011 const unsigned inputRank = rebox.getRank(); 2012 for (unsigned i = 0; i < inputRank; ++i) { 2013 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 2014 SmallVector<mlir::Value, 3> dimInfo = 2015 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 2016 inputExtents.emplace_back(dimInfo[1]); 2017 inputStrides.emplace_back(dimInfo[2]); 2018 } 2019 2020 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 2021 mlir::Value baseAddr = 2022 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 2023 2024 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 2025 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 2026 operands, rewriter); 2027 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 2028 operands, rewriter); 2029 } 2030 2031 private: 2032 /// Write resulting shape and base address in descriptor, and replace rebox 2033 /// op. 2034 mlir::LogicalResult 2035 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2036 mlir::ValueRange lbounds, mlir::ValueRange extents, 2037 mlir::ValueRange strides, 2038 mlir::ConversionPatternRewriter &rewriter) const { 2039 mlir::Location loc = rebox.getLoc(); 2040 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 2041 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 2042 unsigned dim = iter.index(); 2043 mlir::Value lb = lbounds.empty() ? one : lbounds[dim]; 2044 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 2045 dest = insertExtent(rewriter, loc, dest, dim, std::get<0>(iter.value())); 2046 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 2047 } 2048 dest = insertBaseAddress(rewriter, loc, dest, base); 2049 mlir::Value result = 2050 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 2051 rewriter.replaceOp(rebox, result); 2052 return success(); 2053 } 2054 2055 // Apply slice given the base address, extents and strides of the input box. 2056 mlir::LogicalResult 2057 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2058 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 2059 mlir::ValueRange operands, 2060 mlir::ConversionPatternRewriter &rewriter) const { 2061 mlir::Location loc = rebox.getLoc(); 2062 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 2063 mlir::Type idxTy = lowerTy().indexType(); 2064 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2065 // Apply subcomponent and substring shift on base address. 2066 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 2067 // Cast to inputEleTy* so that a GEP can be used. 2068 mlir::Type inputEleTy = getInputEleTy(rebox); 2069 auto llvmElePtrTy = 2070 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 2071 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 2072 2073 if (!rebox.subcomponent().empty()) { 2074 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 2075 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 2076 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 2077 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 2078 } 2079 if (!rebox.substr().empty()) 2080 base = shiftSubstringBase(rewriter, loc, base, 2081 operands[rebox.substrOffset()]); 2082 } 2083 2084 if (rebox.slice().empty()) 2085 // The array section is of the form array[%component][substring], keep 2086 // the input array extents and strides. 2087 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 2088 inputExtents, inputStrides, rewriter); 2089 2090 // Strides from the fir.box are in bytes. 2091 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2092 2093 // The slice is of the form array(i:j:k)[%component]. Compute new extents 2094 // and strides. 2095 llvm::SmallVector<mlir::Value> slicedExtents; 2096 llvm::SmallVector<mlir::Value> slicedStrides; 2097 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2098 const bool sliceHasOrigins = !rebox.shift().empty(); 2099 unsigned sliceOps = rebox.sliceOffset(); 2100 unsigned shiftOps = rebox.shiftOffset(); 2101 auto strideOps = inputStrides.begin(); 2102 const unsigned inputRank = inputStrides.size(); 2103 for (unsigned i = 0; i < inputRank; 2104 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 2105 mlir::Value sliceLb = 2106 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 2107 mlir::Value inputStride = *strideOps; // already idxTy 2108 // Apply origin shift: base += (lb-shift)*input_stride 2109 mlir::Value sliceOrigin = 2110 sliceHasOrigins 2111 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 2112 : one; 2113 mlir::Value diff = 2114 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 2115 mlir::Value offset = 2116 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 2117 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 2118 // Apply upper bound and step if this is a triplet. Otherwise, the 2119 // dimension is dropped and no extents/strides are computed. 2120 mlir::Value upper = operands[sliceOps + 1]; 2121 const bool isTripletSlice = 2122 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 2123 if (isTripletSlice) { 2124 mlir::Value step = 2125 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 2126 // extent = ub-lb+step/step 2127 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 2128 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 2129 sliceUb, step, zero, idxTy); 2130 slicedExtents.emplace_back(extent); 2131 // stride = step*input_stride 2132 mlir::Value stride = 2133 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 2134 slicedStrides.emplace_back(stride); 2135 } 2136 } 2137 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 2138 slicedExtents, slicedStrides, rewriter); 2139 } 2140 2141 /// Apply a new shape to the data described by a box given the base address, 2142 /// extents and strides of the box. 2143 mlir::LogicalResult 2144 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2145 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 2146 mlir::ValueRange operands, 2147 mlir::ConversionPatternRewriter &rewriter) const { 2148 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 2149 operands.begin() + rebox.shiftOffset() + 2150 rebox.shift().size()}; 2151 if (rebox.shape().empty()) { 2152 // Only setting new lower bounds. 2153 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 2154 inputStrides, rewriter); 2155 } 2156 2157 mlir::Location loc = rebox.getLoc(); 2158 // Strides from the fir.box are in bytes. 2159 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 2160 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2161 2162 llvm::SmallVector<mlir::Value> newStrides; 2163 llvm::SmallVector<mlir::Value> newExtents; 2164 mlir::Type idxTy = lowerTy().indexType(); 2165 // First stride from input box is kept. The rest is assumed contiguous 2166 // (it is not possible to reshape otherwise). If the input is scalar, 2167 // which may be OK if all new extents are ones, the stride does not 2168 // matter, use one. 2169 mlir::Value stride = inputStrides.empty() 2170 ? genConstantIndex(loc, idxTy, rewriter, 1) 2171 : inputStrides[0]; 2172 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 2173 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 2174 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 2175 newExtents.emplace_back(extent); 2176 newStrides.emplace_back(stride); 2177 // nextStride = extent * stride; 2178 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 2179 } 2180 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 2181 rewriter); 2182 } 2183 2184 /// Return scalar element type of the input box. 2185 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 2186 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 2187 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 2188 return seqTy.getEleTy(); 2189 return ty; 2190 } 2191 }; 2192 2193 // Code shared between insert_value and extract_value Ops. 2194 struct ValueOpCommon { 2195 // Translate the arguments pertaining to any multidimensional array to 2196 // row-major order for LLVM-IR. 2197 static void toRowMajor(SmallVectorImpl<mlir::Attribute> &attrs, 2198 mlir::Type ty) { 2199 assert(ty && "type is null"); 2200 const auto end = attrs.size(); 2201 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 2202 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2203 const auto dim = getDimension(seq); 2204 if (dim > 1) { 2205 auto ub = std::min(i + dim, end); 2206 std::reverse(attrs.begin() + i, attrs.begin() + ub); 2207 i += dim - 1; 2208 } 2209 ty = getArrayElementType(seq); 2210 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 2211 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 2212 } else { 2213 llvm_unreachable("index into invalid type"); 2214 } 2215 } 2216 } 2217 2218 static llvm::SmallVector<mlir::Attribute> 2219 collectIndices(mlir::ConversionPatternRewriter &rewriter, 2220 mlir::ArrayAttr arrAttr) { 2221 llvm::SmallVector<mlir::Attribute> attrs; 2222 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 2223 if (i->isa<mlir::IntegerAttr>()) { 2224 attrs.push_back(*i); 2225 } else { 2226 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 2227 ++i; 2228 auto ty = i->cast<mlir::TypeAttr>().getValue(); 2229 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 2230 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 2231 } 2232 } 2233 return attrs; 2234 } 2235 2236 private: 2237 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 2238 unsigned result = 1; 2239 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 2240 eleTy; 2241 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 2242 ++result; 2243 return result; 2244 } 2245 2246 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 2247 auto eleTy = ty.getElementType(); 2248 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2249 eleTy = arrTy.getElementType(); 2250 return eleTy; 2251 } 2252 }; 2253 2254 namespace { 2255 /// Extract a subobject value from an ssa-value of aggregate type 2256 struct ExtractValueOpConversion 2257 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 2258 public ValueOpCommon { 2259 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2260 2261 mlir::LogicalResult 2262 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 2263 mlir::ConversionPatternRewriter &rewriter) const override { 2264 auto attrs = collectIndices(rewriter, extractVal.coor()); 2265 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 2266 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 2267 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 2268 extractVal, ty, adaptor.getOperands()[0], position); 2269 return success(); 2270 } 2271 }; 2272 2273 /// InsertValue is the generalized instruction for the composition of new 2274 /// aggregate type values. 2275 struct InsertValueOpConversion 2276 : public FIROpAndTypeConversion<fir::InsertValueOp>, 2277 public ValueOpCommon { 2278 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2279 2280 mlir::LogicalResult 2281 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 2282 mlir::ConversionPatternRewriter &rewriter) const override { 2283 auto attrs = collectIndices(rewriter, insertVal.coor()); 2284 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 2285 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 2286 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2287 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 2288 position); 2289 return success(); 2290 } 2291 }; 2292 2293 /// InsertOnRange inserts a value into a sequence over a range of offsets. 2294 struct InsertOnRangeOpConversion 2295 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 2296 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2297 2298 // Increments an array of subscripts in a row major fasion. 2299 void incrementSubscripts(const SmallVector<uint64_t> &dims, 2300 SmallVector<uint64_t> &subscripts) const { 2301 for (size_t i = dims.size(); i > 0; --i) { 2302 if (++subscripts[i - 1] < dims[i - 1]) { 2303 return; 2304 } 2305 subscripts[i - 1] = 0; 2306 } 2307 } 2308 2309 mlir::LogicalResult 2310 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 2311 mlir::ConversionPatternRewriter &rewriter) const override { 2312 2313 llvm::SmallVector<uint64_t> dims; 2314 auto type = adaptor.getOperands()[0].getType(); 2315 2316 // Iteratively extract the array dimensions from the type. 2317 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2318 dims.push_back(t.getNumElements()); 2319 type = t.getElementType(); 2320 } 2321 2322 SmallVector<uint64_t> lBounds; 2323 SmallVector<uint64_t> uBounds; 2324 2325 // Unzip the upper and lower bound and convert to a row major format. 2326 mlir::DenseIntElementsAttr coor = range.coor(); 2327 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2328 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2329 uBounds.push_back(*i++); 2330 lBounds.push_back(*i); 2331 } 2332 2333 auto &subscripts = lBounds; 2334 auto loc = range.getLoc(); 2335 mlir::Value lastOp = adaptor.getOperands()[0]; 2336 mlir::Value insertVal = adaptor.getOperands()[1]; 2337 2338 auto i64Ty = rewriter.getI64Type(); 2339 while (subscripts != uBounds) { 2340 // Convert uint64_t's to Attribute's. 2341 SmallVector<mlir::Attribute> subscriptAttrs; 2342 for (const auto &subscript : subscripts) 2343 subscriptAttrs.push_back(IntegerAttr::get(i64Ty, subscript)); 2344 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2345 loc, ty, lastOp, insertVal, 2346 ArrayAttr::get(range.getContext(), subscriptAttrs)); 2347 2348 incrementSubscripts(dims, subscripts); 2349 } 2350 2351 // Convert uint64_t's to Attribute's. 2352 SmallVector<mlir::Attribute> subscriptAttrs; 2353 for (const auto &subscript : subscripts) 2354 subscriptAttrs.push_back( 2355 IntegerAttr::get(rewriter.getI64Type(), subscript)); 2356 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2357 2358 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2359 range, ty, lastOp, insertVal, 2360 ArrayAttr::get(range.getContext(), arrayRef)); 2361 2362 return success(); 2363 } 2364 }; 2365 } // namespace 2366 2367 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2368 /// shifted etc. array. 2369 /// (See the static restriction on coordinate_of.) array_coor determines the 2370 /// coordinate (location) of a specific element. 2371 struct XArrayCoorOpConversion 2372 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2373 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2374 2375 mlir::LogicalResult 2376 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2377 mlir::ConversionPatternRewriter &rewriter) const override { 2378 auto loc = coor.getLoc(); 2379 mlir::ValueRange operands = adaptor.getOperands(); 2380 unsigned rank = coor.getRank(); 2381 assert(coor.indices().size() == rank); 2382 assert(coor.shape().empty() || coor.shape().size() == rank); 2383 assert(coor.shift().empty() || coor.shift().size() == rank); 2384 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2385 mlir::Type idxTy = lowerTy().indexType(); 2386 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2387 mlir::Value prevExt = one; 2388 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2389 mlir::Value offset = zero; 2390 const bool isShifted = !coor.shift().empty(); 2391 const bool isSliced = !coor.slice().empty(); 2392 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2393 2394 auto indexOps = coor.indices().begin(); 2395 auto shapeOps = coor.shape().begin(); 2396 auto shiftOps = coor.shift().begin(); 2397 auto sliceOps = coor.slice().begin(); 2398 // For each dimension of the array, generate the offset calculation. 2399 for (unsigned i = 0; i < rank; 2400 ++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) { 2401 mlir::Value index = 2402 integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); 2403 mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, 2404 operands[coor.shiftOffset() + i]) 2405 : one; 2406 mlir::Value step = one; 2407 bool normalSlice = isSliced; 2408 // Compute zero based index in dimension i of the element, applying 2409 // potential triplets and lower bounds. 2410 if (isSliced) { 2411 mlir::Value ub = *(sliceOps + 1); 2412 normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp()); 2413 if (normalSlice) 2414 step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2)); 2415 } 2416 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2417 mlir::Value diff = 2418 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2419 if (normalSlice) { 2420 mlir::Value sliceLb = 2421 integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]); 2422 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2423 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2424 } 2425 // Update the offset given the stride and the zero based index `diff` 2426 // that was just computed. 2427 if (baseIsBoxed) { 2428 // Use stride in bytes from the descriptor. 2429 mlir::Value stride = 2430 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2431 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2432 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2433 } else { 2434 // Use stride computed at last iteration. 2435 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2436 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2437 // Compute next stride assuming contiguity of the base array 2438 // (in element number). 2439 auto nextExt = 2440 integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]); 2441 prevExt = 2442 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2443 } 2444 } 2445 2446 // Add computed offset to the base address. 2447 if (baseIsBoxed) { 2448 // Working with byte offsets. The base address is read from the fir.box. 2449 // and need to be casted to i8* to do the pointer arithmetic. 2450 mlir::Type baseTy = 2451 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2452 mlir::Value base = 2453 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2454 mlir::Type voidPtrTy = getVoidPtrType(); 2455 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2456 llvm::SmallVector<mlir::Value> args{offset}; 2457 auto addr = 2458 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2459 if (coor.subcomponent().empty()) { 2460 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2461 return success(); 2462 } 2463 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2464 args.clear(); 2465 args.push_back(zero); 2466 if (!coor.lenParams().empty()) { 2467 // If type parameters are present, then we don't want to use a GEPOp 2468 // as below, as the LLVM struct type cannot be statically defined. 2469 TODO(loc, "derived type with type parameters"); 2470 } 2471 // TODO: array offset subcomponents must be converted to LLVM's 2472 // row-major layout here. 2473 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2474 args.push_back(operands[i]); 2475 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2476 args); 2477 return success(); 2478 } 2479 2480 // The array was not boxed, so it must be contiguous. offset is therefore an 2481 // element offset and the base type is kept in the GEP unless the element 2482 // type size is itself dynamic. 2483 mlir::Value base; 2484 if (coor.subcomponent().empty()) { 2485 // No subcomponent. 2486 if (!coor.lenParams().empty()) { 2487 // Type parameters. Adjust element size explicitly. 2488 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2489 assert(eleTy && "result must be a reference-like type"); 2490 if (fir::characterWithDynamicLen(eleTy)) { 2491 assert(coor.lenParams().size() == 1); 2492 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2493 eleTy.cast<fir::CharacterType>().getFKind()); 2494 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2495 auto scaledBySize = 2496 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2497 auto length = 2498 integerCast(loc, rewriter, idxTy, 2499 adaptor.getOperands()[coor.lenParamsOffset()]); 2500 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2501 length); 2502 } else { 2503 TODO(loc, "compute size of derived type with type parameters"); 2504 } 2505 } 2506 // Cast the base address to a pointer to T. 2507 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2508 adaptor.getOperands()[0]); 2509 } else { 2510 // Operand #0 must have a pointer type. For subcomponent slicing, we 2511 // want to cast away the array type and have a plain struct type. 2512 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2513 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2514 assert(ptrTy && "expected pointer type"); 2515 mlir::Type eleTy = ptrTy.getElementType(); 2516 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2517 eleTy = arrTy.getElementType(); 2518 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2519 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2520 adaptor.getOperands()[0]); 2521 } 2522 SmallVector<mlir::Value> args = {offset}; 2523 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2524 args.push_back(operands[i]); 2525 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2526 return success(); 2527 } 2528 }; 2529 2530 // 2531 // Primitive operations on Complex types 2532 // 2533 2534 /// Generate inline code for complex addition/subtraction 2535 template <typename LLVMOP, typename OPTY> 2536 static mlir::LLVM::InsertValueOp 2537 complexSum(OPTY sumop, mlir::ValueRange opnds, 2538 mlir::ConversionPatternRewriter &rewriter, 2539 fir::LLVMTypeConverter &lowering) { 2540 mlir::Value a = opnds[0]; 2541 mlir::Value b = opnds[1]; 2542 auto loc = sumop.getLoc(); 2543 auto ctx = sumop.getContext(); 2544 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2545 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2546 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 2547 mlir::Type ty = lowering.convertType(sumop.getType()); 2548 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2549 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2550 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2551 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2552 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 2553 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 2554 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2555 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 2556 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 2557 } 2558 2559 namespace { 2560 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 2561 using FIROpConversion::FIROpConversion; 2562 2563 mlir::LogicalResult 2564 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 2565 mlir::ConversionPatternRewriter &rewriter) const override { 2566 // given: (x + iy) + (x' + iy') 2567 // result: (x + x') + i(y + y') 2568 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 2569 rewriter, lowerTy()); 2570 rewriter.replaceOp(addc, r.getResult()); 2571 return success(); 2572 } 2573 }; 2574 2575 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 2576 using FIROpConversion::FIROpConversion; 2577 2578 mlir::LogicalResult 2579 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 2580 mlir::ConversionPatternRewriter &rewriter) const override { 2581 // given: (x + iy) - (x' + iy') 2582 // result: (x - x') + i(y - y') 2583 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 2584 rewriter, lowerTy()); 2585 rewriter.replaceOp(subc, r.getResult()); 2586 return success(); 2587 } 2588 }; 2589 2590 /// Inlined complex multiply 2591 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 2592 using FIROpConversion::FIROpConversion; 2593 2594 mlir::LogicalResult 2595 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 2596 mlir::ConversionPatternRewriter &rewriter) const override { 2597 // TODO: Can we use a call to __muldc3 ? 2598 // given: (x + iy) * (x' + iy') 2599 // result: (xx'-yy')+i(xy'+yx') 2600 mlir::Value a = adaptor.getOperands()[0]; 2601 mlir::Value b = adaptor.getOperands()[1]; 2602 auto loc = mulc.getLoc(); 2603 auto *ctx = mulc.getContext(); 2604 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2605 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2606 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 2607 mlir::Type ty = convertType(mulc.getType()); 2608 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2609 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2610 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2611 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2612 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 2613 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 2614 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 2615 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 2616 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 2617 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 2618 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2619 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 2620 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 2621 rewriter.replaceOp(mulc, r0.getResult()); 2622 return success(); 2623 } 2624 }; 2625 2626 /// Inlined complex division 2627 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 2628 using FIROpConversion::FIROpConversion; 2629 2630 mlir::LogicalResult 2631 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 2632 mlir::ConversionPatternRewriter &rewriter) const override { 2633 // TODO: Can we use a call to __divdc3 instead? 2634 // Just generate inline code for now. 2635 // given: (x + iy) / (x' + iy') 2636 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 2637 mlir::Value a = adaptor.getOperands()[0]; 2638 mlir::Value b = adaptor.getOperands()[1]; 2639 auto loc = divc.getLoc(); 2640 auto *ctx = divc.getContext(); 2641 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2642 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2643 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 2644 mlir::Type ty = convertType(divc.getType()); 2645 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2646 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2647 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2648 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2649 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 2650 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 2651 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 2652 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 2653 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 2654 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 2655 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 2656 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 2657 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 2658 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 2659 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 2660 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2661 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 2662 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 2663 rewriter.replaceOp(divc, r0.getResult()); 2664 return success(); 2665 } 2666 }; 2667 2668 /// Inlined complex negation 2669 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 2670 using FIROpConversion::FIROpConversion; 2671 2672 mlir::LogicalResult 2673 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 2674 mlir::ConversionPatternRewriter &rewriter) const override { 2675 // given: -(x + iy) 2676 // result: -x - iy 2677 auto *ctxt = neg.getContext(); 2678 auto eleTy = convertType(getComplexEleTy(neg.getType())); 2679 auto ty = convertType(neg.getType()); 2680 auto loc = neg.getLoc(); 2681 mlir::Value o0 = adaptor.getOperands()[0]; 2682 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 2683 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 2684 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 2685 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 2686 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 2687 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 2688 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 2689 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 2690 return success(); 2691 } 2692 }; 2693 2694 /// Conversion pattern for operation that must be dead. The information in these 2695 /// operations is used by other operation. At this point they should not have 2696 /// anymore uses. 2697 /// These operations are normally dead after the pre-codegen pass. 2698 template <typename FromOp> 2699 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 2700 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering) 2701 : FIROpConversion<FromOp>(lowering) {} 2702 using OpAdaptor = typename FromOp::Adaptor; 2703 2704 mlir::LogicalResult 2705 matchAndRewrite(FromOp op, OpAdaptor adaptor, 2706 mlir::ConversionPatternRewriter &rewriter) const final { 2707 if (!op->getUses().empty()) 2708 return rewriter.notifyMatchFailure(op, "op must be dead"); 2709 rewriter.eraseOp(op); 2710 return success(); 2711 } 2712 }; 2713 2714 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 2715 using MustBeDeadConversion::MustBeDeadConversion; 2716 }; 2717 2718 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 2719 using MustBeDeadConversion::MustBeDeadConversion; 2720 }; 2721 2722 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 2723 using MustBeDeadConversion::MustBeDeadConversion; 2724 }; 2725 2726 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 2727 using MustBeDeadConversion::MustBeDeadConversion; 2728 }; 2729 2730 /// `fir.is_present` --> 2731 /// ``` 2732 /// %0 = llvm.mlir.constant(0 : i64) 2733 /// %1 = llvm.ptrtoint %0 2734 /// %2 = llvm.icmp "ne" %1, %0 : i64 2735 /// ``` 2736 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 2737 using FIROpConversion::FIROpConversion; 2738 2739 mlir::LogicalResult 2740 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 2741 mlir::ConversionPatternRewriter &rewriter) const override { 2742 mlir::Type idxTy = lowerTy().indexType(); 2743 mlir::Location loc = isPresent.getLoc(); 2744 auto ptr = adaptor.getOperands()[0]; 2745 2746 if (isPresent.val().getType().isa<fir::BoxCharType>()) { 2747 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 2748 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 2749 2750 mlir::Type ty = structTy.getBody()[0]; 2751 mlir::MLIRContext *ctx = isPresent.getContext(); 2752 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2753 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 2754 } 2755 mlir::LLVM::ConstantOp c0 = 2756 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 2757 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 2758 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 2759 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 2760 2761 return success(); 2762 } 2763 }; 2764 2765 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 2766 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 2767 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 2768 /// element is the length of the character buffer (`#n`). 2769 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 2770 using FIROpConversion::FIROpConversion; 2771 2772 mlir::LogicalResult 2773 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 2774 mlir::ConversionPatternRewriter &rewriter) const override { 2775 mlir::ValueRange operands = adaptor.getOperands(); 2776 MLIRContext *ctx = emboxChar.getContext(); 2777 2778 mlir::Value charBuffer = operands[0]; 2779 mlir::Value charBufferLen = operands[1]; 2780 2781 mlir::Location loc = emboxChar.getLoc(); 2782 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 2783 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 2784 2785 mlir::Type lenTy = 2786 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 2787 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 2788 2789 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2790 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2791 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2792 loc, llvmStructTy, llvmStruct, charBuffer, c0); 2793 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2794 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 2795 2796 return success(); 2797 } 2798 }; 2799 } // namespace 2800 2801 /// Construct an `llvm.extractvalue` instruction. It will return value at 2802 /// element \p x from \p tuple. 2803 static mlir::LLVM::ExtractValueOp 2804 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 2805 mlir::ConversionPatternRewriter &rewriter, 2806 mlir::MLIRContext *ctx, int x) { 2807 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 2808 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 2809 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 2810 } 2811 2812 namespace { 2813 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 2814 /// boxchar. 2815 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 2816 using FIROpConversion::FIROpConversion; 2817 2818 mlir::LogicalResult 2819 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 2820 mlir::ConversionPatternRewriter &rewriter) const override { 2821 mlir::Value boxChar = adaptor.getOperands()[0]; 2822 mlir::Location loc = boxChar.getLoc(); 2823 mlir::MLIRContext *ctx = boxChar.getContext(); 2824 mlir::Type returnValTy = boxCharLen.getResult().getType(); 2825 2826 constexpr int boxcharLenIdx = 1; 2827 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 2828 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 2829 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 2830 rewriter.replaceOp(boxCharLen, lenAfterCast); 2831 2832 return success(); 2833 } 2834 }; 2835 2836 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2837 /// the character buffer and one for the buffer length. 2838 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2839 using FIROpConversion::FIROpConversion; 2840 2841 mlir::LogicalResult 2842 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2843 mlir::ConversionPatternRewriter &rewriter) const override { 2844 MLIRContext *ctx = unboxchar.getContext(); 2845 2846 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2847 mlir::Value tuple = adaptor.getOperands()[0]; 2848 mlir::Type tupleTy = tuple.getType(); 2849 2850 mlir::Location loc = unboxchar.getLoc(); 2851 mlir::Value ptrToBuffer = 2852 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2853 2854 mlir::LLVM::ExtractValueOp len = 2855 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2856 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2857 2858 rewriter.replaceOp(unboxchar, 2859 ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2860 return success(); 2861 } 2862 }; 2863 2864 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 2865 /// components. 2866 /// TODO: Part of supporting Fortran 2003 procedure pointers. 2867 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 2868 using FIROpConversion::FIROpConversion; 2869 2870 mlir::LogicalResult 2871 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 2872 mlir::ConversionPatternRewriter &rewriter) const override { 2873 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 2874 return failure(); 2875 } 2876 }; 2877 2878 /// Convert `fir.field_index`. The conversion depends on whether the size of 2879 /// the record is static or dynamic. 2880 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2881 using FIROpConversion::FIROpConversion; 2882 2883 // NB: most field references should be resolved by this point 2884 mlir::LogicalResult 2885 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2886 mlir::ConversionPatternRewriter &rewriter) const override { 2887 auto recTy = field.on_type().cast<fir::RecordType>(); 2888 unsigned index = recTy.getFieldIndex(field.field_id()); 2889 2890 if (!fir::hasDynamicSize(recTy)) { 2891 // Derived type has compile-time constant layout. Return index of the 2892 // component type in the parent type (to be used in GEP). 2893 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2894 field.getLoc(), rewriter, index)}); 2895 return success(); 2896 } 2897 2898 // Derived type has compile-time constant layout. Call the compiler 2899 // generated function to determine the byte offset of the field at runtime. 2900 // This returns a non-constant. 2901 FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2902 field.getContext(), getOffsetMethodName(recTy, field.field_id())); 2903 NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2904 NamedAttribute fieldAttr = rewriter.getNamedAttr( 2905 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2906 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2907 field, lowerTy().offsetType(), adaptor.getOperands(), 2908 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2909 return success(); 2910 } 2911 2912 // Re-Construct the name of the compiler generated method that calculates the 2913 // offset 2914 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2915 llvm::StringRef field) { 2916 return recTy.getName().str() + "P." + field.str() + ".offset"; 2917 } 2918 }; 2919 2920 /// Convert to (memory) reference to a reference to a subobject. 2921 /// The coordinate_of op is a Swiss army knife operation that can be used on 2922 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2923 /// With unboxed arrays, there is the restriction that the array have a static 2924 /// shape in all but the last column. 2925 struct CoordinateOpConversion 2926 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2927 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2928 2929 mlir::LogicalResult 2930 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2931 mlir::ConversionPatternRewriter &rewriter) const override { 2932 mlir::ValueRange operands = adaptor.getOperands(); 2933 2934 mlir::Location loc = coor.getLoc(); 2935 mlir::Value base = operands[0]; 2936 mlir::Type baseObjectTy = coor.getBaseType(); 2937 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2938 assert(objectTy && "fir.coordinate_of expects a reference type"); 2939 2940 // Complex type - basically, extract the real or imaginary part 2941 if (fir::isa_complex(objectTy)) { 2942 mlir::LLVM::ConstantOp c0 = 2943 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2944 SmallVector<mlir::Value> offs = {c0, operands[1]}; 2945 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2946 rewriter.replaceOp(coor, gep); 2947 return success(); 2948 } 2949 2950 // Boxed type - get the base pointer from the box 2951 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2952 return doRewriteBox(coor, ty, operands, loc, rewriter); 2953 2954 // Reference or pointer type 2955 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType>()) 2956 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2957 2958 return rewriter.notifyMatchFailure( 2959 coor, "fir.coordinate_of base operand has unsupported type"); 2960 } 2961 2962 unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) const { 2963 return fir::hasDynamicSize(ty) 2964 ? op.getDefiningOp() 2965 ->getAttrOfType<mlir::IntegerAttr>("field") 2966 .getInt() 2967 : getIntValue(op); 2968 } 2969 2970 int64_t getIntValue(mlir::Value val) const { 2971 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2972 mlir::Operation *defop = val.getDefiningOp(); 2973 2974 if (auto constOp = dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2975 return constOp.value(); 2976 if (auto llConstOp = dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2977 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2978 return attr.getValue().getSExtValue(); 2979 fir::emitFatalError(val.getLoc(), "must be a constant"); 2980 } 2981 2982 bool hasSubDimensions(mlir::Type type) const { 2983 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2984 } 2985 2986 /// Check whether this form of `!fir.coordinate_of` is supported. These 2987 /// additional checks are required, because we are not yet able to convert 2988 /// all valid forms of `!fir.coordinate_of`. 2989 /// TODO: Either implement the unsupported cases or extend the verifier 2990 /// in FIROps.cpp instead. 2991 bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) const { 2992 const std::size_t numOfCoors = coors.size(); 2993 std::size_t i = 0; 2994 bool subEle = false; 2995 bool ptrEle = false; 2996 for (; i < numOfCoors; ++i) { 2997 mlir::Value nxtOpnd = coors[i]; 2998 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2999 subEle = true; 3000 i += arrTy.getDimension() - 1; 3001 type = arrTy.getEleTy(); 3002 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 3003 subEle = true; 3004 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3005 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 3006 subEle = true; 3007 type = tupTy.getType(getIntValue(nxtOpnd)); 3008 } else { 3009 ptrEle = true; 3010 } 3011 } 3012 if (ptrEle) 3013 return (!subEle) && (numOfCoors == 1); 3014 return subEle && (i >= numOfCoors); 3015 } 3016 3017 /// Walk the abstract memory layout and determine if the path traverses any 3018 /// array types with unknown shape. Return true iff all the array types have a 3019 /// constant shape along the path. 3020 bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) const { 3021 const std::size_t sz = coors.size(); 3022 std::size_t i = 0; 3023 for (; i < sz; ++i) { 3024 mlir::Value nxtOpnd = coors[i]; 3025 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 3026 if (fir::sequenceWithNonConstantShape(arrTy)) 3027 return false; 3028 i += arrTy.getDimension() - 1; 3029 type = arrTy.getEleTy(); 3030 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 3031 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 3032 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 3033 type = strTy.getType(getIntValue(nxtOpnd)); 3034 } else { 3035 return true; 3036 } 3037 } 3038 return true; 3039 } 3040 3041 private: 3042 mlir::LogicalResult 3043 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 3044 mlir::Location loc, 3045 mlir::ConversionPatternRewriter &rewriter) const { 3046 mlir::Type boxObjTy = coor.getBaseType(); 3047 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 3048 3049 mlir::Value boxBaseAddr = operands[0]; 3050 3051 // 1. SPECIAL CASE (uses `fir.len_param_index`): 3052 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 3053 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 3054 // %addr = coordinate_of %box, %lenp 3055 if (coor.getNumOperands() == 2) { 3056 mlir::Operation *coordinateDef = (*coor.coor().begin()).getDefiningOp(); 3057 if (isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) { 3058 TODO(loc, 3059 "fir.coordinate_of - fir.len_param_index is not supported yet"); 3060 } 3061 } 3062 3063 // 2. GENERAL CASE: 3064 // 2.1. (`fir.array`) 3065 // %box = ... : !fix.box<!fir.array<?xU>> 3066 // %idx = ... : index 3067 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 3068 // 2.2 (`fir.derived`) 3069 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 3070 // %idx = ... : i32 3071 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 3072 // 2.3 (`fir.derived` inside `fir.array`) 3073 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, field_2:f32}>>> 3074 // %idx1 = ... : index 3075 // %idx2 = ... : i32 3076 // %resultAddr = coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 3077 // 2.4. TODO: Either document or disable any other case that the following 3078 // implementation might convert. 3079 mlir::LLVM::ConstantOp c0 = 3080 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 3081 mlir::Value resultAddr = 3082 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 3083 boxBaseAddr, rewriter); 3084 auto currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 3085 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 3086 3087 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 3088 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3089 if (i != 1) 3090 TODO(loc, "fir.array nested inside other array and/or derived type"); 3091 // Applies byte strides from the box. Ignore lower bound from box 3092 // since fir.coordinate_of indexes are zero based. Lowering takes care 3093 // of lower bound aspects. This both accounts for dynamically sized 3094 // types and non contiguous arrays. 3095 auto idxTy = lowerTy().indexType(); 3096 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 3097 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 3098 index < lastIndex; ++index) { 3099 mlir::Value stride = 3100 loadStrideFromBox(loc, operands[0], index - i, rewriter); 3101 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 3102 operands[index], stride); 3103 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 3104 } 3105 auto voidPtrBase = 3106 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 3107 SmallVector<mlir::Value> args{off}; 3108 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 3109 voidPtrBase, args); 3110 i += arrTy.getDimension() - 1; 3111 currentObjTy = arrTy.getEleTy(); 3112 } else if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) { 3113 auto recRefTy = 3114 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 3115 mlir::Value nxtOpnd = operands[i]; 3116 auto memObj = 3117 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 3118 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 3119 currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3120 auto llvmCurrentObjTy = lowerTy().convertType(currentObjTy); 3121 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 3122 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 3123 args); 3124 resultAddr = 3125 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 3126 } else { 3127 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 3128 } 3129 } 3130 3131 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 3132 return success(); 3133 } 3134 3135 mlir::LogicalResult 3136 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 3137 mlir::ValueRange operands, mlir::Location loc, 3138 mlir::ConversionPatternRewriter &rewriter) const { 3139 mlir::Type baseObjectTy = coor.getBaseType(); 3140 3141 mlir::Type currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 3142 bool hasSubdimension = hasSubDimensions(currentObjTy); 3143 bool columnIsDeferred = !hasSubdimension; 3144 3145 if (!supportedCoordinate(currentObjTy, operands.drop_front(1))) { 3146 TODO(loc, "unsupported combination of coordinate operands"); 3147 } 3148 3149 const bool hasKnownShape = 3150 arraysHaveKnownShape(currentObjTy, operands.drop_front(1)); 3151 3152 // If only the column is `?`, then we can simply place the column value in 3153 // the 0-th GEP position. 3154 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3155 if (!hasKnownShape) { 3156 const unsigned sz = arrTy.getDimension(); 3157 if (arraysHaveKnownShape(arrTy.getEleTy(), 3158 operands.drop_front(1 + sz))) { 3159 llvm::ArrayRef<int64_t> shape = arrTy.getShape(); 3160 bool allConst = true; 3161 for (unsigned i = 0; i < sz - 1; ++i) { 3162 if (shape[i] < 0) { 3163 allConst = false; 3164 break; 3165 } 3166 } 3167 if (allConst) 3168 columnIsDeferred = true; 3169 } 3170 } 3171 } 3172 3173 if (fir::hasDynamicSize(fir::unwrapSequenceType(currentObjTy))) { 3174 mlir::emitError( 3175 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 3176 return failure(); 3177 } 3178 3179 if (hasKnownShape || columnIsDeferred) { 3180 SmallVector<mlir::Value> offs; 3181 if (hasKnownShape && hasSubdimension) { 3182 mlir::LLVM::ConstantOp c0 = 3183 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 3184 offs.push_back(c0); 3185 } 3186 const std::size_t sz = operands.size(); 3187 Optional<int> dims; 3188 SmallVector<mlir::Value> arrIdx; 3189 for (std::size_t i = 1; i < sz; ++i) { 3190 mlir::Value nxtOpnd = operands[i]; 3191 3192 if (!currentObjTy) { 3193 mlir::emitError(loc, "invalid coordinate/check failed"); 3194 return failure(); 3195 } 3196 3197 // check if the i-th coordinate relates to an array 3198 if (dims.hasValue()) { 3199 arrIdx.push_back(nxtOpnd); 3200 int dimsLeft = *dims; 3201 if (dimsLeft > 1) { 3202 dims = dimsLeft - 1; 3203 continue; 3204 } 3205 currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy(); 3206 // append array range in reverse (FIR arrays are column-major) 3207 offs.append(arrIdx.rbegin(), arrIdx.rend()); 3208 arrIdx.clear(); 3209 dims.reset(); 3210 continue; 3211 } 3212 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3213 int d = arrTy.getDimension() - 1; 3214 if (d > 0) { 3215 dims = d; 3216 arrIdx.push_back(nxtOpnd); 3217 continue; 3218 } 3219 currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy(); 3220 offs.push_back(nxtOpnd); 3221 continue; 3222 } 3223 3224 // check if the i-th coordinate relates to a field 3225 if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) 3226 currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3227 else if (auto tupTy = currentObjTy.dyn_cast<mlir::TupleType>()) 3228 currentObjTy = tupTy.getType(getIntValue(nxtOpnd)); 3229 else 3230 currentObjTy = nullptr; 3231 3232 offs.push_back(nxtOpnd); 3233 } 3234 if (dims.hasValue()) 3235 offs.append(arrIdx.rbegin(), arrIdx.rend()); 3236 mlir::Value base = operands[0]; 3237 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 3238 rewriter.replaceOp(coor, retval); 3239 return success(); 3240 } 3241 3242 mlir::emitError(loc, "fir.coordinate_of base operand has unsupported type"); 3243 return failure(); 3244 } 3245 }; 3246 3247 } // namespace 3248 3249 namespace { 3250 /// Convert FIR dialect to LLVM dialect 3251 /// 3252 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3253 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3254 /// 3255 /// This pass is not complete yet. We are upstreaming it in small patches. 3256 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3257 public: 3258 mlir::ModuleOp getModule() { return getOperation(); } 3259 3260 void runOnOperation() override final { 3261 auto mod = getModule(); 3262 if (!forcedTargetTriple.empty()) { 3263 fir::setTargetTriple(mod, forcedTargetTriple); 3264 } 3265 3266 auto *context = getModule().getContext(); 3267 fir::LLVMTypeConverter typeConverter{getModule()}; 3268 mlir::OwningRewritePatternList pattern(context); 3269 pattern.insert< 3270 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3271 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3272 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3273 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3274 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3275 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3276 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3277 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3278 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3279 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3280 FreeMemOpConversion, HasValueOpConversion, GenTypeDescOpConversion, 3281 GlobalLenOpConversion, GlobalOpConversion, InsertOnRangeOpConversion, 3282 InsertValueOpConversion, IsPresentOpConversion, 3283 LenParamIndexOpConversion, LoadOpConversion, NegcOpConversion, 3284 NoReassocOpConversion, MulcOpConversion, SelectCaseOpConversion, 3285 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3286 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3287 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3288 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3289 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3290 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>( 3291 typeConverter); 3292 mlir::populateStdToLLVMConversionPatterns(typeConverter, pattern); 3293 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3294 pattern); 3295 mlir::ConversionTarget target{*context}; 3296 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3297 3298 // required NOPs for applying a full conversion 3299 target.addLegalOp<mlir::ModuleOp>(); 3300 3301 // apply the patterns 3302 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3303 std::move(pattern)))) { 3304 signalPassFailure(); 3305 } 3306 } 3307 }; 3308 } // namespace 3309 3310 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3311 return std::make_unique<FIRToLLVMLowering>(); 3312 } 3313