1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/TypeCode.h" 20 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 21 #include "mlir/Conversion/LLVMCommon/Pattern.h" 22 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 23 #include "mlir/IR/BuiltinTypes.h" 24 #include "mlir/IR/Matchers.h" 25 #include "mlir/Pass/Pass.h" 26 #include "llvm/ADT/ArrayRef.h" 27 28 #define DEBUG_TYPE "flang-codegen" 29 30 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 31 #include "TypeConverter.h" 32 33 // TODO: This should really be recovered from the specified target. 34 static constexpr unsigned defaultAlign = 8; 35 36 /// `fir.box` attribute values as defined for CFI_attribute_t in 37 /// flang/ISO_Fortran_binding.h. 38 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 39 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 40 41 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 42 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 43 } 44 45 static mlir::LLVM::ConstantOp 46 genConstantIndex(mlir::Location loc, mlir::Type ity, 47 mlir::ConversionPatternRewriter &rewriter, 48 std::int64_t offset) { 49 auto cattr = rewriter.getI64IntegerAttr(offset); 50 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 51 } 52 53 static Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 54 mlir::Block *insertBefore) { 55 assert(insertBefore && "expected valid insertion block"); 56 return rewriter.createBlock(insertBefore->getParent(), 57 mlir::Region::iterator(insertBefore)); 58 } 59 60 namespace { 61 /// FIR conversion pattern template 62 template <typename FromOp> 63 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 64 public: 65 explicit FIROpConversion(fir::LLVMTypeConverter &lowering) 66 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering) {} 67 68 protected: 69 mlir::Type convertType(mlir::Type ty) const { 70 return lowerTy().convertType(ty); 71 } 72 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 73 74 mlir::Type getVoidPtrType() const { 75 return mlir::LLVM::LLVMPointerType::get( 76 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 77 } 78 79 mlir::LLVM::ConstantOp 80 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 81 int value) const { 82 mlir::Type i32Ty = rewriter.getI32Type(); 83 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 84 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 85 } 86 87 mlir::LLVM::ConstantOp 88 genConstantOffset(mlir::Location loc, 89 mlir::ConversionPatternRewriter &rewriter, 90 int offset) const { 91 mlir::Type ity = lowerTy().offsetType(); 92 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 93 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 94 } 95 96 /// Construct code sequence to extract the specifc value from a `fir.box`. 97 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 98 mlir::Type resultTy, 99 mlir::ConversionPatternRewriter &rewriter, 100 unsigned boxValue) const { 101 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 102 mlir::LLVM::ConstantOp cValuePos = 103 genConstantOffset(loc, rewriter, boxValue); 104 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 105 auto p = rewriter.create<mlir::LLVM::GEPOp>( 106 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 107 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 108 } 109 110 /// Method to construct code sequence to get the triple for dimension `dim` 111 /// from a box. 112 SmallVector<mlir::Value, 3> 113 getDimsFromBox(mlir::Location loc, ArrayRef<mlir::Type> retTys, 114 mlir::Value box, mlir::Value dim, 115 mlir::ConversionPatternRewriter &rewriter) const { 116 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 117 mlir::LLVM::ConstantOp cDims = 118 genConstantOffset(loc, rewriter, kDimsPosInBox); 119 mlir::LLVM::LoadOp l0 = 120 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 121 mlir::LLVM::LoadOp l1 = 122 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 123 mlir::LLVM::LoadOp l2 = 124 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 125 return {l0.getResult(), l1.getResult(), l2.getResult()}; 126 } 127 128 mlir::LLVM::LoadOp 129 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 130 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 131 mlir::Type ty, 132 mlir::ConversionPatternRewriter &rewriter) const { 133 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 134 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 135 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 136 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 137 } 138 139 mlir::Value 140 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 141 mlir::ConversionPatternRewriter &rewriter) const { 142 auto idxTy = lowerTy().indexType(); 143 auto c0 = genConstantOffset(loc, rewriter, 0); 144 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 145 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 146 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 147 rewriter); 148 } 149 150 /// Read base address from a fir.box. Returned address has type ty. 151 mlir::Value 152 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 153 mlir::ConversionPatternRewriter &rewriter) const { 154 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 155 mlir::LLVM::ConstantOp cAddr = 156 genConstantOffset(loc, rewriter, kAddrPosInBox); 157 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 158 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 159 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 160 } 161 162 mlir::Value 163 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 164 mlir::ConversionPatternRewriter &rewriter) const { 165 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 166 mlir::LLVM::ConstantOp cElemLen = 167 genConstantOffset(loc, rewriter, kElemLenPosInBox); 168 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 169 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 170 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 171 } 172 173 // Load the attribute from the \p box and perform a check against \p maskValue 174 // The final comparison is implemented as `(attribute & maskValue) != 0`. 175 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 176 mlir::ConversionPatternRewriter &rewriter, 177 unsigned maskValue) const { 178 mlir::Type attrTy = rewriter.getI32Type(); 179 mlir::Value attribute = 180 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 181 mlir::LLVM::ConstantOp attrMask = 182 genConstantOffset(loc, rewriter, maskValue); 183 auto maskRes = 184 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 185 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 186 return rewriter.create<mlir::LLVM::ICmpOp>( 187 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 188 } 189 190 // Get the element type given an LLVM type that is of the form 191 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 192 static mlir::Type getBoxEleTy(mlir::Type type, 193 llvm::ArrayRef<unsigned> indexes) { 194 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 195 type = t.getElementType(); 196 for (auto i : indexes) { 197 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 198 assert(!t.isOpaque() && i < t.getBody().size()); 199 type = t.getBody()[i]; 200 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 201 type = t.getElementType(); 202 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 203 type = t.getElementType(); 204 } else { 205 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 206 "request for invalid box element type"); 207 } 208 } 209 return type; 210 } 211 212 // Return LLVM type of the base address given the LLVM type 213 // of the related descriptor (lowered fir.box type). 214 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 215 return getBoxEleTy(type, {kAddrPosInBox}); 216 } 217 218 template <typename... ARGS> 219 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 220 mlir::ConversionPatternRewriter &rewriter, 221 mlir::Value base, ARGS... args) const { 222 SmallVector<mlir::Value> cv{args...}; 223 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 224 } 225 226 /// Perform an extension or truncation as needed on an integer value. Lowering 227 /// to the specific target may involve some sign-extending or truncation of 228 /// values, particularly to fit them from abstract box types to the 229 /// appropriate reified structures. 230 mlir::Value integerCast(mlir::Location loc, 231 mlir::ConversionPatternRewriter &rewriter, 232 mlir::Type ty, mlir::Value val) const { 233 auto valTy = val.getType(); 234 // If the value was not yet lowered, lower its type so that it can 235 // be used in getPrimitiveTypeSizeInBits. 236 if (!valTy.isa<mlir::IntegerType>()) 237 valTy = convertType(valTy); 238 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 239 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 240 if (toSize < fromSize) 241 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 242 if (toSize > fromSize) 243 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 244 return val; 245 } 246 247 fir::LLVMTypeConverter &lowerTy() const { 248 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 249 } 250 }; 251 252 /// FIR conversion pattern template 253 template <typename FromOp> 254 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 255 public: 256 using FIROpConversion<FromOp>::FIROpConversion; 257 using OpAdaptor = typename FromOp::Adaptor; 258 259 mlir::LogicalResult 260 matchAndRewrite(FromOp op, OpAdaptor adaptor, 261 mlir::ConversionPatternRewriter &rewriter) const final { 262 mlir::Type ty = this->convertType(op.getType()); 263 return doRewrite(op, ty, adaptor, rewriter); 264 } 265 266 virtual mlir::LogicalResult 267 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 268 mlir::ConversionPatternRewriter &rewriter) const = 0; 269 }; 270 271 /// Create value signaling an absent optional argument in a call, e.g. 272 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 273 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 274 using FIROpConversion::FIROpConversion; 275 276 mlir::LogicalResult 277 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 278 mlir::ConversionPatternRewriter &rewriter) const override { 279 mlir::Type ty = convertType(absent.getType()); 280 mlir::Location loc = absent.getLoc(); 281 282 if (absent.getType().isa<fir::BoxCharType>()) { 283 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 284 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 285 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 286 auto nullField = 287 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 288 mlir::MLIRContext *ctx = absent.getContext(); 289 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 290 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 291 absent, ty, undefStruct, nullField, c0); 292 } else { 293 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 294 } 295 return success(); 296 } 297 }; 298 299 // Lower `fir.address_of` operation to `llvm.address_of` operation. 300 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 301 using FIROpConversion::FIROpConversion; 302 303 mlir::LogicalResult 304 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 305 mlir::ConversionPatternRewriter &rewriter) const override { 306 auto ty = convertType(addr.getType()); 307 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 308 addr, ty, addr.symbol().getRootReference().getValue()); 309 return success(); 310 } 311 }; 312 } // namespace 313 314 /// Lookup the function to compute the memory size of this parametric derived 315 /// type. The size of the object may depend on the LEN type parameters of the 316 /// derived type. 317 static mlir::LLVM::LLVMFuncOp 318 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 319 mlir::ConversionPatternRewriter &rewriter) { 320 auto module = op->getParentOfType<mlir::ModuleOp>(); 321 std::string name = recTy.getName().str() + "P.mem.size"; 322 return module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name); 323 } 324 325 namespace { 326 /// convert to LLVM IR dialect `alloca` 327 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 328 using FIROpConversion::FIROpConversion; 329 330 mlir::LogicalResult 331 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 332 mlir::ConversionPatternRewriter &rewriter) const override { 333 mlir::ValueRange operands = adaptor.getOperands(); 334 auto loc = alloc.getLoc(); 335 mlir::Type ity = lowerTy().indexType(); 336 unsigned i = 0; 337 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 338 mlir::Type ty = convertType(alloc.getType()); 339 mlir::Type resultTy = ty; 340 if (alloc.hasLenParams()) { 341 unsigned end = alloc.numLenParams(); 342 llvm::SmallVector<mlir::Value> lenParams; 343 for (; i < end; ++i) 344 lenParams.push_back(operands[i]); 345 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 346 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 347 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 348 chrTy.getContext(), chrTy.getFKind()); 349 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 350 assert(end == 1); 351 size = integerCast(loc, rewriter, ity, lenParams[0]); 352 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 353 mlir::LLVM::LLVMFuncOp memSizeFn = 354 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 355 if (!memSizeFn) 356 emitError(loc, "did not find allocation function"); 357 mlir::NamedAttribute attr = rewriter.getNamedAttr( 358 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 359 auto call = rewriter.create<mlir::LLVM::CallOp>( 360 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 361 size = call.getResult(0); 362 ty = mlir::LLVM::LLVMPointerType::get( 363 mlir::IntegerType::get(alloc.getContext(), 8)); 364 } else { 365 return emitError(loc, "unexpected type ") 366 << scalarType << " with type parameters"; 367 } 368 } 369 if (alloc.hasShapeOperands()) { 370 mlir::Type allocEleTy = fir::unwrapRefType(alloc.getType()); 371 // Scale the size by constant factors encoded in the array type. 372 if (auto seqTy = allocEleTy.dyn_cast<fir::SequenceType>()) { 373 fir::SequenceType::Extent constSize = 1; 374 for (auto extent : seqTy.getShape()) 375 if (extent != fir::SequenceType::getUnknownExtent()) 376 constSize *= extent; 377 mlir::Value constVal{ 378 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 379 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, constVal); 380 } 381 unsigned end = operands.size(); 382 for (; i < end; ++i) 383 size = rewriter.create<mlir::LLVM::MulOp>( 384 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 385 } 386 if (ty == resultTy) { 387 // Do not emit the bitcast if ty and resultTy are the same. 388 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 389 alloc->getAttrs()); 390 } else { 391 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 392 alloc->getAttrs()); 393 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 394 } 395 return success(); 396 } 397 }; 398 399 /// Lower `fir.box_addr` to the sequence of operations to extract the first 400 /// element of the box. 401 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 402 using FIROpConversion::FIROpConversion; 403 404 mlir::LogicalResult 405 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 406 mlir::ConversionPatternRewriter &rewriter) const override { 407 mlir::Value a = adaptor.getOperands()[0]; 408 auto loc = boxaddr.getLoc(); 409 mlir::Type ty = convertType(boxaddr.getType()); 410 if (auto argty = boxaddr.val().getType().dyn_cast<fir::BoxType>()) { 411 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 412 } else { 413 auto c0attr = rewriter.getI32IntegerAttr(0); 414 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 415 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 416 c0); 417 } 418 return success(); 419 } 420 }; 421 422 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 423 /// dimension infomartion from the boxed value. 424 /// Result in a triple set of GEPs and loads. 425 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 426 using FIROpConversion::FIROpConversion; 427 428 mlir::LogicalResult 429 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 430 mlir::ConversionPatternRewriter &rewriter) const override { 431 SmallVector<mlir::Type, 3> resultTypes = { 432 convertType(boxdims.getResult(0).getType()), 433 convertType(boxdims.getResult(1).getType()), 434 convertType(boxdims.getResult(2).getType()), 435 }; 436 auto results = 437 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 438 adaptor.getOperands()[1], rewriter); 439 rewriter.replaceOp(boxdims, results); 440 return success(); 441 } 442 }; 443 444 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 445 /// an element in the boxed value. 446 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 447 using FIROpConversion::FIROpConversion; 448 449 mlir::LogicalResult 450 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 451 mlir::ConversionPatternRewriter &rewriter) const override { 452 mlir::Value a = adaptor.getOperands()[0]; 453 auto loc = boxelesz.getLoc(); 454 auto ty = convertType(boxelesz.getType()); 455 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 456 rewriter.replaceOp(boxelesz, elemSize); 457 return success(); 458 } 459 }; 460 461 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 462 /// boxed value was from an ALLOCATABLE entity. 463 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 464 using FIROpConversion::FIROpConversion; 465 466 mlir::LogicalResult 467 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 468 mlir::ConversionPatternRewriter &rewriter) const override { 469 mlir::Value box = adaptor.getOperands()[0]; 470 auto loc = boxisalloc.getLoc(); 471 mlir::Value check = 472 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 473 rewriter.replaceOp(boxisalloc, check); 474 return success(); 475 } 476 }; 477 478 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 479 /// boxed is an array. 480 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 481 using FIROpConversion::FIROpConversion; 482 483 mlir::LogicalResult 484 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 485 mlir::ConversionPatternRewriter &rewriter) const override { 486 mlir::Value a = adaptor.getOperands()[0]; 487 auto loc = boxisarray.getLoc(); 488 auto rank = 489 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 490 auto c0 = genConstantOffset(loc, rewriter, 0); 491 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 492 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 493 return success(); 494 } 495 }; 496 497 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 498 /// boxed value was from a POINTER entity. 499 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 500 using FIROpConversion::FIROpConversion; 501 502 mlir::LogicalResult 503 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 504 mlir::ConversionPatternRewriter &rewriter) const override { 505 mlir::Value box = adaptor.getOperands()[0]; 506 auto loc = boxisptr.getLoc(); 507 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 508 rewriter.replaceOp(boxisptr, check); 509 return success(); 510 } 511 }; 512 513 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 514 /// the box. 515 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 516 using FIROpConversion::FIROpConversion; 517 518 mlir::LogicalResult 519 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 520 mlir::ConversionPatternRewriter &rewriter) const override { 521 mlir::Value a = adaptor.getOperands()[0]; 522 auto loc = boxrank.getLoc(); 523 mlir::Type ty = convertType(boxrank.getType()); 524 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 525 rewriter.replaceOp(boxrank, result); 526 return success(); 527 } 528 }; 529 530 /// Lower `fir.string_lit` to LLVM IR dialect operation. 531 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 532 using FIROpConversion::FIROpConversion; 533 534 mlir::LogicalResult 535 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 536 mlir::ConversionPatternRewriter &rewriter) const override { 537 auto ty = convertType(constop.getType()); 538 auto attr = constop.getValue(); 539 if (attr.isa<mlir::StringAttr>()) { 540 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 541 return success(); 542 } 543 544 auto arr = attr.cast<mlir::ArrayAttr>(); 545 auto charTy = constop.getType().cast<fir::CharacterType>(); 546 unsigned bits = lowerTy().characterBitsize(charTy); 547 mlir::Type intTy = rewriter.getIntegerType(bits); 548 auto attrs = llvm::map_range( 549 arr.getValue(), [intTy, bits](mlir::Attribute attr) -> Attribute { 550 return mlir::IntegerAttr::get( 551 intTy, 552 attr.cast<mlir::IntegerAttr>().getValue().sextOrTrunc(bits)); 553 }); 554 mlir::Type vecType = mlir::VectorType::get(arr.size(), intTy); 555 auto denseAttr = mlir::DenseElementsAttr::get( 556 vecType.cast<mlir::ShapedType>(), llvm::to_vector<8>(attrs)); 557 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>(constop, ty, 558 denseAttr); 559 return success(); 560 } 561 }; 562 563 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 564 /// boxproc. 565 /// TODO: Part of supporting Fortran 2003 procedure pointers. 566 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 567 using FIROpConversion::FIROpConversion; 568 569 mlir::LogicalResult 570 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 571 mlir::ConversionPatternRewriter &rewriter) const override { 572 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 573 return failure(); 574 } 575 }; 576 577 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 578 /// descriptor from the box. 579 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 580 using FIROpConversion::FIROpConversion; 581 582 mlir::LogicalResult 583 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 584 mlir::ConversionPatternRewriter &rewriter) const override { 585 mlir::Value box = adaptor.getOperands()[0]; 586 auto loc = boxtypedesc.getLoc(); 587 mlir::Type typeTy = 588 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 589 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 590 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 591 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 592 result); 593 return success(); 594 } 595 }; 596 597 // `fir.call` -> `llvm.call` 598 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 599 using FIROpConversion::FIROpConversion; 600 601 mlir::LogicalResult 602 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 603 mlir::ConversionPatternRewriter &rewriter) const override { 604 SmallVector<mlir::Type> resultTys; 605 for (auto r : call.getResults()) 606 resultTys.push_back(convertType(r.getType())); 607 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 608 call, resultTys, adaptor.getOperands(), call->getAttrs()); 609 return success(); 610 } 611 }; 612 } // namespace 613 614 static mlir::Type getComplexEleTy(mlir::Type complex) { 615 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 616 return cc.getElementType(); 617 return complex.cast<fir::ComplexType>().getElementType(); 618 } 619 620 namespace { 621 /// Compare complex values 622 /// 623 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 624 /// 625 /// For completeness, all other comparison are done on the real component only. 626 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 627 using FIROpConversion::FIROpConversion; 628 629 mlir::LogicalResult 630 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 631 mlir::ConversionPatternRewriter &rewriter) const override { 632 mlir::ValueRange operands = adaptor.getOperands(); 633 mlir::MLIRContext *ctxt = cmp.getContext(); 634 mlir::Type eleTy = convertType(getComplexEleTy(cmp.lhs().getType())); 635 mlir::Type resTy = convertType(cmp.getType()); 636 mlir::Location loc = cmp.getLoc(); 637 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 638 SmallVector<mlir::Value, 2> rp{rewriter.create<mlir::LLVM::ExtractValueOp>( 639 loc, eleTy, operands[0], pos0), 640 rewriter.create<mlir::LLVM::ExtractValueOp>( 641 loc, eleTy, operands[1], pos0)}; 642 auto rcp = 643 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 644 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 645 SmallVector<mlir::Value, 2> ip{rewriter.create<mlir::LLVM::ExtractValueOp>( 646 loc, eleTy, operands[0], pos1), 647 rewriter.create<mlir::LLVM::ExtractValueOp>( 648 loc, eleTy, operands[1], pos1)}; 649 auto icp = 650 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 651 SmallVector<mlir::Value, 2> cp{rcp, icp}; 652 switch (cmp.getPredicate()) { 653 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 654 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 655 break; 656 case mlir::arith::CmpFPredicate::UNE: // .NE. 657 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 658 break; 659 default: 660 rewriter.replaceOp(cmp, rcp.getResult()); 661 break; 662 } 663 return success(); 664 } 665 }; 666 667 /// Lower complex constants 668 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 669 using FIROpConversion::FIROpConversion; 670 671 mlir::LogicalResult 672 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 673 mlir::ConversionPatternRewriter &rewriter) const override { 674 mlir::Location loc = conc.getLoc(); 675 mlir::MLIRContext *ctx = conc.getContext(); 676 mlir::Type ty = convertType(conc.getType()); 677 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 678 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 679 auto realPart = 680 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 681 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 682 auto imPart = 683 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 684 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 685 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 686 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 687 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 688 loc, ty, undef, realPart, realIndex); 689 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 690 imPart, imIndex); 691 return success(); 692 } 693 694 inline APFloat getValue(mlir::Attribute attr) const { 695 return attr.cast<fir::RealAttr>().getValue(); 696 } 697 }; 698 699 /// convert value of from-type to value of to-type 700 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 701 using FIROpConversion::FIROpConversion; 702 703 static bool isFloatingPointTy(mlir::Type ty) { 704 return ty.isa<mlir::FloatType>(); 705 } 706 707 mlir::LogicalResult 708 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 709 mlir::ConversionPatternRewriter &rewriter) const override { 710 auto fromTy = convertType(convert.value().getType()); 711 auto toTy = convertType(convert.res().getType()); 712 mlir::Value op0 = adaptor.getOperands()[0]; 713 if (fromTy == toTy) { 714 rewriter.replaceOp(convert, op0); 715 return success(); 716 } 717 auto loc = convert.getLoc(); 718 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 719 unsigned toBits, mlir::Type toTy) -> mlir::Value { 720 if (fromBits == toBits) { 721 // TODO: Converting between two floating-point representations with the 722 // same bitwidth is not allowed for now. 723 mlir::emitError(loc, 724 "cannot implicitly convert between two floating-point " 725 "representations of the same bitwidth"); 726 return {}; 727 } 728 if (fromBits > toBits) 729 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 730 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 731 }; 732 // Complex to complex conversion. 733 if (fir::isa_complex(convert.value().getType()) && 734 fir::isa_complex(convert.res().getType())) { 735 // Special case: handle the conversion of a complex such that both the 736 // real and imaginary parts are converted together. 737 auto zero = mlir::ArrayAttr::get(convert.getContext(), 738 rewriter.getI32IntegerAttr(0)); 739 auto one = mlir::ArrayAttr::get(convert.getContext(), 740 rewriter.getI32IntegerAttr(1)); 741 auto ty = convertType(getComplexEleTy(convert.value().getType())); 742 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 743 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 744 auto nt = convertType(getComplexEleTy(convert.res().getType())); 745 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 746 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 747 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 748 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 749 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 750 auto i1 = 751 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 752 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 753 ic, one); 754 return mlir::success(); 755 } 756 // Floating point to floating point conversion. 757 if (isFloatingPointTy(fromTy)) { 758 if (isFloatingPointTy(toTy)) { 759 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 760 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 761 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 762 rewriter.replaceOp(convert, v); 763 return mlir::success(); 764 } 765 if (toTy.isa<mlir::IntegerType>()) { 766 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 767 return mlir::success(); 768 } 769 } else if (fromTy.isa<mlir::IntegerType>()) { 770 // Integer to integer conversion. 771 if (toTy.isa<mlir::IntegerType>()) { 772 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 773 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 774 assert(fromBits != toBits); 775 if (fromBits > toBits) { 776 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 777 return mlir::success(); 778 } 779 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 780 return mlir::success(); 781 } 782 // Integer to floating point conversion. 783 if (isFloatingPointTy(toTy)) { 784 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 785 return mlir::success(); 786 } 787 // Integer to pointer conversion. 788 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 789 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 790 return mlir::success(); 791 } 792 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 793 // Pointer to integer conversion. 794 if (toTy.isa<mlir::IntegerType>()) { 795 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 796 return mlir::success(); 797 } 798 // Pointer to pointer conversion. 799 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 800 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 801 return mlir::success(); 802 } 803 } 804 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 805 } 806 }; 807 808 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 809 /// table. 810 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 811 using FIROpConversion::FIROpConversion; 812 813 mlir::LogicalResult 814 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 815 mlir::ConversionPatternRewriter &rewriter) const override { 816 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 817 return failure(); 818 } 819 }; 820 821 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 822 /// derived type. 823 struct DispatchTableOpConversion 824 : public FIROpConversion<fir::DispatchTableOp> { 825 using FIROpConversion::FIROpConversion; 826 827 mlir::LogicalResult 828 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 829 mlir::ConversionPatternRewriter &rewriter) const override { 830 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 831 return failure(); 832 } 833 }; 834 835 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 836 /// method-name to a function. 837 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 838 using FIROpConversion::FIROpConversion; 839 840 mlir::LogicalResult 841 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 842 mlir::ConversionPatternRewriter &rewriter) const override { 843 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 844 return failure(); 845 } 846 }; 847 848 /// Lower `fir.global_len` operation. 849 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 850 using FIROpConversion::FIROpConversion; 851 852 mlir::LogicalResult 853 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 854 mlir::ConversionPatternRewriter &rewriter) const override { 855 TODO(globalLen.getLoc(), "fir.global_len codegen"); 856 return failure(); 857 } 858 }; 859 860 /// Lower fir.len_param_index 861 struct LenParamIndexOpConversion 862 : public FIROpConversion<fir::LenParamIndexOp> { 863 using FIROpConversion::FIROpConversion; 864 865 // FIXME: this should be specialized by the runtime target 866 mlir::LogicalResult 867 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 868 mlir::ConversionPatternRewriter &rewriter) const override { 869 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 870 } 871 }; 872 873 /// Lower `fir.gentypedesc` to a global constant. 874 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 875 using FIROpConversion::FIROpConversion; 876 877 mlir::LogicalResult 878 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 879 mlir::ConversionPatternRewriter &rewriter) const override { 880 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 881 return failure(); 882 } 883 }; 884 } // namespace 885 886 /// Return the LLVMFuncOp corresponding to the standard malloc call. 887 static mlir::LLVM::LLVMFuncOp 888 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 889 auto module = op->getParentOfType<mlir::ModuleOp>(); 890 if (mlir::LLVM::LLVMFuncOp mallocFunc = 891 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 892 return mallocFunc; 893 mlir::OpBuilder moduleBuilder( 894 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 895 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 896 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 897 rewriter.getUnknownLoc(), "malloc", 898 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 899 indexType, 900 /*isVarArg=*/false)); 901 } 902 903 /// Helper function for generating the LLVM IR that computes the size 904 /// in bytes for a derived type. 905 static mlir::Value 906 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 907 mlir::ConversionPatternRewriter &rewriter) { 908 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 909 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 910 llvm::SmallVector<mlir::Value> args{one}; 911 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 912 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 913 } 914 915 namespace { 916 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 917 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 918 using FIROpConversion::FIROpConversion; 919 920 mlir::LogicalResult 921 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 922 mlir::ConversionPatternRewriter &rewriter) const override { 923 mlir::Type ty = convertType(heap.getType()); 924 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 925 mlir::Location loc = heap.getLoc(); 926 auto ity = lowerTy().indexType(); 927 if (auto recTy = fir::unwrapSequenceType(heap.getAllocatedType()) 928 .dyn_cast<fir::RecordType>()) 929 if (recTy.getNumLenParams() != 0) { 930 TODO(loc, 931 "fir.allocmem codegen of derived type with length parameters"); 932 return failure(); 933 } 934 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 935 for (mlir::Value opnd : adaptor.getOperands()) 936 size = rewriter.create<mlir::LLVM::MulOp>( 937 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 938 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 939 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 940 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 941 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 942 malloc.getResult(0)); 943 return success(); 944 } 945 946 // Compute the (allocation) size of the allocmem type in bytes. 947 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 948 mlir::ConversionPatternRewriter &rewriter, 949 mlir::Type llTy) const { 950 // Use the primitive size, if available. 951 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 952 if (auto size = 953 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 954 return genConstantIndex(loc, idxTy, rewriter, size / 8); 955 956 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 957 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 958 } 959 }; 960 } // namespace 961 962 /// Return the LLVMFuncOp corresponding to the standard free call. 963 static mlir::LLVM::LLVMFuncOp 964 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 965 auto module = op->getParentOfType<mlir::ModuleOp>(); 966 if (mlir::LLVM::LLVMFuncOp freeFunc = 967 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 968 return freeFunc; 969 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 970 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 971 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 972 rewriter.getUnknownLoc(), "free", 973 mlir::LLVM::LLVMFunctionType::get(voidType, 974 getVoidPtrType(op.getContext()), 975 /*isVarArg=*/false)); 976 } 977 978 namespace { 979 /// Lower a `fir.freemem` instruction into `llvm.call @free` 980 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 981 using FIROpConversion::FIROpConversion; 982 983 mlir::LogicalResult 984 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 985 mlir::ConversionPatternRewriter &rewriter) const override { 986 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 987 mlir::Location loc = freemem.getLoc(); 988 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 989 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 990 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 991 rewriter.create<mlir::LLVM::CallOp>( 992 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 993 rewriter.eraseOp(freemem); 994 return success(); 995 } 996 }; 997 998 /// Convert `fir.end` 999 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 1000 using FIROpConversion::FIROpConversion; 1001 1002 mlir::LogicalResult 1003 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 1004 mlir::ConversionPatternRewriter &rewriter) const override { 1005 TODO(firEnd.getLoc(), "fir.end codegen"); 1006 return failure(); 1007 } 1008 }; 1009 1010 /// Lower `fir.has_value` operation to `llvm.return` operation. 1011 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 1012 using FIROpConversion::FIROpConversion; 1013 1014 mlir::LogicalResult 1015 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 1016 mlir::ConversionPatternRewriter &rewriter) const override { 1017 rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(op, adaptor.getOperands()); 1018 return success(); 1019 } 1020 }; 1021 1022 /// Lower `fir.global` operation to `llvm.global` operation. 1023 /// `fir.insert_on_range` operations are replaced with constant dense attribute 1024 /// if they are applied on the full range. 1025 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 1026 using FIROpConversion::FIROpConversion; 1027 1028 mlir::LogicalResult 1029 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 1030 mlir::ConversionPatternRewriter &rewriter) const override { 1031 auto tyAttr = convertType(global.getType()); 1032 if (global.getType().isa<fir::BoxType>()) 1033 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1034 auto loc = global.getLoc(); 1035 mlir::Attribute initAttr{}; 1036 if (global.initVal()) 1037 initAttr = global.initVal().getValue(); 1038 auto linkage = convertLinkage(global.linkName()); 1039 auto isConst = global.constant().hasValue(); 1040 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 1041 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 1042 auto &gr = g.getInitializerRegion(); 1043 rewriter.inlineRegionBefore(global.region(), gr, gr.end()); 1044 if (!gr.empty()) { 1045 // Replace insert_on_range with a constant dense attribute if the 1046 // initialization is on the full range. 1047 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 1048 for (auto insertOp : insertOnRangeOps) { 1049 if (isFullRange(insertOp.coor(), insertOp.getType())) { 1050 auto seqTyAttr = convertType(insertOp.getType()); 1051 auto *op = insertOp.val().getDefiningOp(); 1052 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 1053 if (!constant) { 1054 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 1055 if (!convertOp) 1056 continue; 1057 constant = cast<mlir::arith::ConstantOp>( 1058 convertOp.value().getDefiningOp()); 1059 } 1060 mlir::Type vecType = mlir::VectorType::get( 1061 insertOp.getType().getShape(), constant.getType()); 1062 auto denseAttr = mlir::DenseElementsAttr::get( 1063 vecType.cast<ShapedType>(), constant.getValue()); 1064 rewriter.setInsertionPointAfter(insertOp); 1065 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 1066 insertOp, seqTyAttr, denseAttr); 1067 } 1068 } 1069 } 1070 rewriter.eraseOp(global); 1071 return success(); 1072 } 1073 1074 bool isFullRange(mlir::DenseIntElementsAttr indexes, 1075 fir::SequenceType seqTy) const { 1076 auto extents = seqTy.getShape(); 1077 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 1078 return false; 1079 auto cur_index = indexes.value_begin<int64_t>(); 1080 for (unsigned i = 0; i < indexes.size(); i += 2) { 1081 if (*(cur_index++) != 0) 1082 return false; 1083 if (*(cur_index++) != extents[i / 2] - 1) 1084 return false; 1085 } 1086 return true; 1087 } 1088 1089 // TODO: String comparaison should be avoided. Replace linkName with an 1090 // enumeration. 1091 mlir::LLVM::Linkage convertLinkage(Optional<StringRef> optLinkage) const { 1092 if (optLinkage.hasValue()) { 1093 auto name = optLinkage.getValue(); 1094 if (name == "internal") 1095 return mlir::LLVM::Linkage::Internal; 1096 if (name == "linkonce") 1097 return mlir::LLVM::Linkage::Linkonce; 1098 if (name == "common") 1099 return mlir::LLVM::Linkage::Common; 1100 if (name == "weak") 1101 return mlir::LLVM::Linkage::Weak; 1102 } 1103 return mlir::LLVM::Linkage::External; 1104 } 1105 }; 1106 } // namespace 1107 1108 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 1109 Optional<mlir::ValueRange> destOps, 1110 mlir::ConversionPatternRewriter &rewriter, 1111 mlir::Block *newBlock) { 1112 if (destOps.hasValue()) 1113 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 1114 newBlock, mlir::ValueRange()); 1115 else 1116 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 1117 } 1118 1119 template <typename A, typename B> 1120 static void genBrOp(A caseOp, mlir::Block *dest, Optional<B> destOps, 1121 mlir::ConversionPatternRewriter &rewriter) { 1122 if (destOps.hasValue()) 1123 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 1124 dest); 1125 else 1126 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 1127 } 1128 1129 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 1130 mlir::Block *dest, 1131 Optional<mlir::ValueRange> destOps, 1132 mlir::ConversionPatternRewriter &rewriter) { 1133 auto *thisBlock = rewriter.getInsertionBlock(); 1134 auto *newBlock = createBlock(rewriter, dest); 1135 rewriter.setInsertionPointToEnd(thisBlock); 1136 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 1137 rewriter.setInsertionPointToEnd(newBlock); 1138 } 1139 1140 namespace { 1141 /// Conversion of `fir.select_case` 1142 /// 1143 /// The `fir.select_case` operation is converted to a if-then-else ladder. 1144 /// Depending on the case condition type, one or several comparison and 1145 /// conditional branching can be generated. 1146 /// 1147 /// A a point value case such as `case(4)`, a lower bound case such as 1148 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 1149 /// simple comparison between the selector value and the constant value in the 1150 /// case. The block associated with the case condition is then executed if 1151 /// the comparison succeed otherwise it branch to the next block with the 1152 /// comparison for the the next case conditon. 1153 /// 1154 /// A closed interval case condition such as `case(7:10)` is converted with a 1155 /// first comparison and conditional branching for the lower bound. If 1156 /// successful, it branch to a second block with the comparison for the 1157 /// upper bound in the same case condition. 1158 /// 1159 /// TODO: lowering of CHARACTER type cases is not handled yet. 1160 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 1161 using FIROpConversion::FIROpConversion; 1162 1163 mlir::LogicalResult 1164 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 1165 mlir::ConversionPatternRewriter &rewriter) const override { 1166 unsigned conds = caseOp.getNumConditions(); 1167 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 1168 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 1169 auto ty = caseOp.getSelector().getType(); 1170 if (ty.isa<fir::CharacterType>()) { 1171 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 1172 return failure(); 1173 } 1174 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 1175 auto loc = caseOp.getLoc(); 1176 for (unsigned t = 0; t != conds; ++t) { 1177 mlir::Block *dest = caseOp.getSuccessor(t); 1178 llvm::Optional<mlir::ValueRange> destOps = 1179 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 1180 llvm::Optional<mlir::ValueRange> cmpOps = 1181 *caseOp.getCompareOperands(adaptor.getOperands(), t); 1182 mlir::Value caseArg = *(cmpOps.getValue().begin()); 1183 mlir::Attribute attr = cases[t]; 1184 if (attr.isa<fir::PointIntervalAttr>()) { 1185 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1186 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 1187 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1188 continue; 1189 } 1190 if (attr.isa<fir::LowerBoundAttr>()) { 1191 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1192 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 1193 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1194 continue; 1195 } 1196 if (attr.isa<fir::UpperBoundAttr>()) { 1197 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1198 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 1199 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1200 continue; 1201 } 1202 if (attr.isa<fir::ClosedIntervalAttr>()) { 1203 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1204 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 1205 auto *thisBlock = rewriter.getInsertionBlock(); 1206 auto *newBlock1 = createBlock(rewriter, dest); 1207 auto *newBlock2 = createBlock(rewriter, dest); 1208 rewriter.setInsertionPointToEnd(thisBlock); 1209 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 1210 rewriter.setInsertionPointToEnd(newBlock1); 1211 mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1); 1212 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 1213 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 1214 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 1215 rewriter.setInsertionPointToEnd(newBlock2); 1216 continue; 1217 } 1218 assert(attr.isa<mlir::UnitAttr>()); 1219 assert((t + 1 == conds) && "unit must be last"); 1220 genBrOp(caseOp, dest, destOps, rewriter); 1221 } 1222 return success(); 1223 } 1224 }; 1225 } // namespace 1226 1227 template <typename OP> 1228 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 1229 typename OP::Adaptor adaptor, 1230 mlir::ConversionPatternRewriter &rewriter) { 1231 unsigned conds = select.getNumConditions(); 1232 auto cases = select.getCases().getValue(); 1233 mlir::Value selector = adaptor.selector(); 1234 auto loc = select.getLoc(); 1235 assert(conds > 0 && "select must have cases"); 1236 1237 llvm::SmallVector<mlir::Block *> destinations; 1238 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 1239 mlir::Block *defaultDestination; 1240 mlir::ValueRange defaultOperands; 1241 llvm::SmallVector<int32_t> caseValues; 1242 1243 for (unsigned t = 0; t != conds; ++t) { 1244 mlir::Block *dest = select.getSuccessor(t); 1245 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 1246 const mlir::Attribute &attr = cases[t]; 1247 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 1248 destinations.push_back(dest); 1249 destinationsOperands.push_back(destOps.hasValue() ? *destOps 1250 : ValueRange()); 1251 caseValues.push_back(intAttr.getInt()); 1252 continue; 1253 } 1254 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 1255 assert((t + 1 == conds) && "unit must be last"); 1256 defaultDestination = dest; 1257 defaultOperands = destOps.hasValue() ? *destOps : ValueRange(); 1258 } 1259 1260 // LLVM::SwitchOp takes a i32 type for the selector. 1261 if (select.getSelector().getType() != rewriter.getI32Type()) 1262 selector = 1263 rewriter.create<LLVM::TruncOp>(loc, rewriter.getI32Type(), selector); 1264 1265 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 1266 select, selector, 1267 /*defaultDestination=*/defaultDestination, 1268 /*defaultOperands=*/defaultOperands, 1269 /*caseValues=*/caseValues, 1270 /*caseDestinations=*/destinations, 1271 /*caseOperands=*/destinationsOperands, 1272 /*branchWeights=*/ArrayRef<int32_t>()); 1273 } 1274 1275 namespace { 1276 /// conversion of fir::SelectOp to an if-then-else ladder 1277 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 1278 using FIROpConversion::FIROpConversion; 1279 1280 mlir::LogicalResult 1281 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 1282 mlir::ConversionPatternRewriter &rewriter) const override { 1283 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 1284 return success(); 1285 } 1286 }; 1287 1288 /// `fir.load` --> `llvm.load` 1289 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 1290 using FIROpConversion::FIROpConversion; 1291 1292 mlir::LogicalResult 1293 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 1294 mlir::ConversionPatternRewriter &rewriter) const override { 1295 // fir.box is a special case because it is considered as an ssa values in 1296 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 1297 // and fir.box end up being the same llvm types and loading a 1298 // fir.ref<fir.box> is actually a no op in LLVM. 1299 if (load.getType().isa<fir::BoxType>()) { 1300 rewriter.replaceOp(load, adaptor.getOperands()[0]); 1301 } else { 1302 mlir::Type ty = convertType(load.getType()); 1303 ArrayRef<NamedAttribute> at = load->getAttrs(); 1304 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 1305 load, ty, adaptor.getOperands(), at); 1306 } 1307 return success(); 1308 } 1309 }; 1310 1311 /// Lower `fir.no_reassoc` to LLVM IR dialect. 1312 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 1313 /// math flags? 1314 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 1315 using FIROpConversion::FIROpConversion; 1316 1317 mlir::LogicalResult 1318 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 1319 mlir::ConversionPatternRewriter &rewriter) const override { 1320 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 1321 return success(); 1322 } 1323 }; 1324 1325 /// Lower `fir.select_type` to LLVM IR dialect. 1326 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 1327 using FIROpConversion::FIROpConversion; 1328 1329 mlir::LogicalResult 1330 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 1331 mlir::ConversionPatternRewriter &rewriter) const override { 1332 mlir::emitError(select.getLoc(), 1333 "fir.select_type should have already been converted"); 1334 return failure(); 1335 } 1336 }; 1337 1338 /// conversion of fir::SelectRankOp to an if-then-else ladder 1339 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 1340 using FIROpConversion::FIROpConversion; 1341 1342 mlir::LogicalResult 1343 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 1344 mlir::ConversionPatternRewriter &rewriter) const override { 1345 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 1346 return success(); 1347 } 1348 }; 1349 1350 /// `fir.store` --> `llvm.store` 1351 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 1352 using FIROpConversion::FIROpConversion; 1353 1354 mlir::LogicalResult 1355 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 1356 mlir::ConversionPatternRewriter &rewriter) const override { 1357 if (store.value().getType().isa<fir::BoxType>()) { 1358 // fir.box value is actually in memory, load it first before storing it. 1359 mlir::Location loc = store.getLoc(); 1360 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 1361 auto val = rewriter.create<mlir::LLVM::LoadOp>( 1362 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 1363 adaptor.getOperands()[0]); 1364 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 1365 store, val, adaptor.getOperands()[1]); 1366 } else { 1367 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 1368 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 1369 } 1370 return success(); 1371 } 1372 }; 1373 1374 /// convert to LLVM IR dialect `undef` 1375 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 1376 using FIROpConversion::FIROpConversion; 1377 1378 mlir::LogicalResult 1379 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 1380 mlir::ConversionPatternRewriter &rewriter) const override { 1381 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 1382 undef, convertType(undef.getType())); 1383 return success(); 1384 } 1385 }; 1386 1387 /// `fir.unreachable` --> `llvm.unreachable` 1388 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 1389 using FIROpConversion::FIROpConversion; 1390 1391 mlir::LogicalResult 1392 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 1393 mlir::ConversionPatternRewriter &rewriter) const override { 1394 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 1395 return success(); 1396 } 1397 }; 1398 1399 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 1400 using FIROpConversion::FIROpConversion; 1401 1402 mlir::LogicalResult 1403 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 1404 mlir::ConversionPatternRewriter &rewriter) const override { 1405 mlir::Type ty = convertType(zero.getType()); 1406 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 1407 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 1408 } else if (ty.isa<mlir::IntegerType>()) { 1409 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 1410 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 1411 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 1412 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 1413 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 1414 } else { 1415 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 1416 return rewriter.notifyMatchFailure( 1417 zero, 1418 "conversion of fir.zero with aggregate type not implemented yet"); 1419 } 1420 return success(); 1421 } 1422 }; 1423 } // namespace 1424 1425 /// Common base class for embox to descriptor conversion. 1426 template <typename OP> 1427 struct EmboxCommonConversion : public FIROpConversion<OP> { 1428 using FIROpConversion<OP>::FIROpConversion; 1429 1430 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1431 // The order to find the LLVMFuncOp is as follows: 1432 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1433 // 2. The first ancestor that is a LLVMFuncOp. 1434 mlir::LLVM::LLVMFuncOp 1435 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1436 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1437 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1438 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1439 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1440 } 1441 1442 // Generate an alloca of size 1 and type \p toTy. 1443 mlir::LLVM::AllocaOp 1444 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1445 mlir::ConversionPatternRewriter &rewriter) const { 1446 auto thisPt = rewriter.saveInsertionPoint(); 1447 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1448 rewriter.setInsertionPointToStart(&func.front()); 1449 auto size = this->genI32Constant(loc, rewriter, 1); 1450 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1451 rewriter.restoreInsertionPoint(thisPt); 1452 return al; 1453 } 1454 1455 static int getCFIAttr(fir::BoxType boxTy) { 1456 auto eleTy = boxTy.getEleTy(); 1457 if (eleTy.isa<fir::PointerType>()) 1458 return CFI_attribute_pointer; 1459 if (eleTy.isa<fir::HeapType>()) 1460 return CFI_attribute_allocatable; 1461 return CFI_attribute_other; 1462 } 1463 1464 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1465 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1466 .template dyn_cast<fir::RecordType>(); 1467 } 1468 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1469 auto recTy = unwrapIfDerived(boxTy); 1470 return recTy && recTy.getNumLenParams() > 0; 1471 } 1472 static bool isDerivedType(fir::BoxType boxTy) { 1473 return unwrapIfDerived(boxTy) != nullptr; 1474 } 1475 1476 // Get the element size and CFI type code of the boxed value. 1477 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1478 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1479 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1480 auto doInteger = 1481 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1482 int typeCode = fir::integerBitsToTypeCode(width); 1483 return {this->genConstantOffset(loc, rewriter, width / 8), 1484 this->genConstantOffset(loc, rewriter, typeCode)}; 1485 }; 1486 auto doLogical = 1487 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1488 int typeCode = fir::logicalBitsToTypeCode(width); 1489 return {this->genConstantOffset(loc, rewriter, width / 8), 1490 this->genConstantOffset(loc, rewriter, typeCode)}; 1491 }; 1492 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1493 int typeCode = fir::realBitsToTypeCode(width); 1494 return {this->genConstantOffset(loc, rewriter, width / 8), 1495 this->genConstantOffset(loc, rewriter, typeCode)}; 1496 }; 1497 auto doComplex = 1498 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1499 auto typeCode = fir::complexBitsToTypeCode(width); 1500 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1501 this->genConstantOffset(loc, rewriter, typeCode)}; 1502 }; 1503 auto doCharacter = 1504 [&](unsigned width, 1505 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1506 auto typeCode = fir::characterBitsToTypeCode(width); 1507 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1508 if (width == 8) 1509 return {len, typeCodeVal}; 1510 auto byteWidth = this->genConstantOffset(loc, rewriter, width / 8); 1511 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1512 auto size = 1513 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len); 1514 return {size, typeCodeVal}; 1515 }; 1516 auto getKindMap = [&]() -> fir::KindMapping & { 1517 return this->lowerTy().getKindMap(); 1518 }; 1519 // Pointer-like types. 1520 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1521 boxEleTy = eleTy; 1522 // Integer types. 1523 if (fir::isa_integer(boxEleTy)) { 1524 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1525 return doInteger(ty.getWidth()); 1526 auto ty = boxEleTy.cast<fir::IntegerType>(); 1527 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1528 } 1529 // Floating point types. 1530 if (fir::isa_real(boxEleTy)) { 1531 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1532 return doFloat(ty.getWidth()); 1533 auto ty = boxEleTy.cast<fir::RealType>(); 1534 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1535 } 1536 // Complex types. 1537 if (fir::isa_complex(boxEleTy)) { 1538 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1539 return doComplex( 1540 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1541 auto ty = boxEleTy.cast<fir::ComplexType>(); 1542 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1543 } 1544 // Character types. 1545 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1546 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1547 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1548 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1549 return doCharacter(charWidth, len); 1550 } 1551 assert(!lenParams.empty()); 1552 return doCharacter(charWidth, lenParams.back()); 1553 } 1554 // Logical type. 1555 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1556 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1557 // Array types. 1558 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1559 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1560 // Derived-type types. 1561 if (boxEleTy.isa<fir::RecordType>()) { 1562 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1563 this->lowerTy().convertType(boxEleTy)); 1564 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1565 auto one = 1566 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1567 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1568 mlir::ValueRange{one}); 1569 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1570 loc, this->lowerTy().indexType(), gep); 1571 return {eleSize, 1572 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1573 } 1574 // Reference type. 1575 if (fir::isa_ref_type(boxEleTy)) { 1576 // FIXME: use the target pointer size rather than sizeof(void*) 1577 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1578 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1579 } 1580 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1581 } 1582 1583 /// Basic pattern to write a field in the descriptor 1584 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1585 mlir::Location loc, mlir::Value dest, 1586 ArrayRef<unsigned> fldIndexes, mlir::Value value, 1587 bool bitcast = false) const { 1588 auto boxTy = dest.getType(); 1589 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1590 if (bitcast) 1591 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1592 else 1593 value = this->integerCast(loc, rewriter, fldTy, value); 1594 SmallVector<mlir::Attribute, 2> attrs; 1595 for (auto i : fldIndexes) 1596 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1597 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1598 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1599 indexesAttr); 1600 } 1601 1602 inline mlir::Value 1603 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1604 mlir::Location loc, mlir::Value dest, 1605 mlir::Value base) const { 1606 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1607 /*bitCast=*/true); 1608 } 1609 1610 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1611 mlir::Location loc, mlir::Value dest, 1612 unsigned dim, mlir::Value lb) const { 1613 return insertField(rewriter, loc, dest, 1614 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1615 } 1616 1617 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1618 mlir::Location loc, mlir::Value dest, 1619 unsigned dim, mlir::Value extent) const { 1620 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1621 extent); 1622 } 1623 1624 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1625 mlir::Location loc, mlir::Value dest, 1626 unsigned dim, mlir::Value stride) const { 1627 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1628 stride); 1629 } 1630 1631 /// Get the address of the type descriptor global variable that was created by 1632 /// lowering for derived type \p recType. 1633 template <typename BOX> 1634 mlir::Value 1635 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1636 mlir::Location loc, fir::RecordType recType) const { 1637 std::string name = recType.getLoweredName(); 1638 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1639 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1640 auto ty = mlir::LLVM::LLVMPointerType::get( 1641 this->lowerTy().convertType(global.getType())); 1642 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1643 global.getSymName()); 1644 } 1645 if (auto global = 1646 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1647 // The global may have already been translated to LLVM. 1648 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1649 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1650 global.getSymName()); 1651 } 1652 // The global does not exist in the current translation unit, but may be 1653 // defined elsewhere (e.g., type defined in a module). 1654 // For now, create a extern_weak symbol (will become nullptr if unresolved) 1655 // to support generating code without the front-end generated symbols. 1656 // These could be made available_externally to require the symbols to be 1657 // defined elsewhere and to cause link-time failure otherwise. 1658 auto i8Ty = rewriter.getIntegerType(8); 1659 mlir::OpBuilder modBuilder(module.getBodyRegion()); 1660 // TODO: The symbol should be lowered to constant in lowering, they are read 1661 // only. 1662 modBuilder.create<mlir::LLVM::GlobalOp>(loc, i8Ty, /*isConstant=*/false, 1663 mlir::LLVM::Linkage::ExternWeak, 1664 name, mlir::Attribute{}); 1665 auto ty = mlir::LLVM::LLVMPointerType::get(i8Ty); 1666 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, name); 1667 } 1668 1669 template <typename BOX> 1670 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1671 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1672 unsigned rank, mlir::ValueRange lenParams) const { 1673 auto loc = box.getLoc(); 1674 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1675 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1676 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1677 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1678 mlir::Value descriptor = 1679 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1680 1681 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1682 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1683 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1684 typeparams.push_back(box.substr()[1]); 1685 } 1686 1687 // Write each of the fields with the appropriate values 1688 auto [eleSize, cfiTy] = 1689 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1690 descriptor = 1691 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1692 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1693 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1694 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1695 this->genI32Constant(loc, rewriter, rank)); 1696 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1697 descriptor = 1698 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1699 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1700 const bool hasAddendum = isDerivedType(boxTy); 1701 descriptor = 1702 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1703 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1704 1705 if (hasAddendum) { 1706 auto isArray = 1707 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1708 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1709 auto typeDesc = 1710 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1711 descriptor = 1712 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1713 /*bitCast=*/true); 1714 } 1715 1716 return {boxTy, descriptor, eleSize}; 1717 } 1718 1719 /// Compute the base address of a substring given the base address of a scalar 1720 /// string and the zero based string lower bound. 1721 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1722 mlir::Location loc, mlir::Value base, 1723 mlir::Value lowerBound) const { 1724 llvm::SmallVector<mlir::Value> gepOperands; 1725 auto baseType = 1726 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1727 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1728 auto idxTy = this->lowerTy().indexType(); 1729 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1730 gepOperands.push_back(zero); 1731 } 1732 gepOperands.push_back(lowerBound); 1733 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1734 } 1735 1736 /// If the embox is not in a globalOp body, allocate storage for the box; 1737 /// store the value inside and return the generated alloca. Return the input 1738 /// value otherwise. 1739 mlir::Value 1740 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1741 mlir::Location loc, mlir::Value boxValue) const { 1742 auto *thisBlock = rewriter.getInsertionBlock(); 1743 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1744 return boxValue; 1745 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1746 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1747 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1748 return alloca; 1749 } 1750 }; 1751 1752 /// Compute the extent of a triplet slice (lb:ub:step). 1753 static mlir::Value 1754 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1755 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1756 mlir::Value step, mlir::Value zero, mlir::Type type) { 1757 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1758 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1759 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1760 // If the resulting extent is negative (`ub-lb` and `step` have different 1761 // signs), zero must be returned instead. 1762 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1763 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1764 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1765 } 1766 1767 /// Create a generic box on a memory reference. This conversions lowers the 1768 /// abstract box to the appropriate, initialized descriptor. 1769 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1770 using EmboxCommonConversion::EmboxCommonConversion; 1771 1772 mlir::LogicalResult 1773 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1774 mlir::ConversionPatternRewriter &rewriter) const override { 1775 assert(!embox.getShape() && "There should be no dims on this embox op"); 1776 auto [boxTy, dest, eleSize] = 1777 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1778 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1779 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1780 adaptor.getOperands()[0]); 1781 if (isDerivedTypeWithLenParams(boxTy)) { 1782 TODO(embox.getLoc(), 1783 "fir.embox codegen of derived with length parameters"); 1784 return failure(); 1785 } 1786 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1787 rewriter.replaceOp(embox, result); 1788 return success(); 1789 } 1790 }; 1791 1792 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1793 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1794 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1795 using FIROpConversion::FIROpConversion; 1796 1797 mlir::LogicalResult 1798 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1799 mlir::ConversionPatternRewriter &rewriter) const override { 1800 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1801 return failure(); 1802 } 1803 }; 1804 1805 /// Create a generic box on a memory reference. 1806 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1807 using EmboxCommonConversion::EmboxCommonConversion; 1808 1809 mlir::LogicalResult 1810 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1811 mlir::ConversionPatternRewriter &rewriter) const override { 1812 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1813 xbox, rewriter, xbox.getOutRank(), 1814 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1815 // Generate the triples in the dims field of the descriptor 1816 mlir::ValueRange operands = adaptor.getOperands(); 1817 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1818 mlir::Value base = operands[0]; 1819 assert(!xbox.shape().empty() && "must have a shape"); 1820 unsigned shapeOffset = xbox.shapeOffset(); 1821 bool hasShift = !xbox.shift().empty(); 1822 unsigned shiftOffset = xbox.shiftOffset(); 1823 bool hasSlice = !xbox.slice().empty(); 1824 unsigned sliceOffset = xbox.sliceOffset(); 1825 mlir::Location loc = xbox.getLoc(); 1826 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1827 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1828 mlir::Value prevDim = integerCast(loc, rewriter, i64Ty, eleSize); 1829 mlir::Value prevPtrOff = one; 1830 mlir::Type eleTy = boxTy.getEleTy(); 1831 const unsigned rank = xbox.getRank(); 1832 llvm::SmallVector<mlir::Value> gepArgs; 1833 unsigned constRows = 0; 1834 mlir::Value ptrOffset = zero; 1835 if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType())) 1836 if (auto seqTy = memEleTy.dyn_cast<fir::SequenceType>()) { 1837 mlir::Type seqEleTy = seqTy.getEleTy(); 1838 // Adjust the element scaling factor if the element is a dependent type. 1839 if (fir::hasDynamicSize(seqEleTy)) { 1840 if (fir::isa_char(seqEleTy)) { 1841 assert(xbox.lenParams().size() == 1); 1842 prevPtrOff = integerCast(loc, rewriter, i64Ty, 1843 operands[xbox.lenParamOffset()]); 1844 } else if (seqEleTy.isa<fir::RecordType>()) { 1845 TODO(loc, "generate call to calculate size of PDT"); 1846 } else { 1847 return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type"); 1848 } 1849 } else { 1850 constRows = seqTy.getConstantRows(); 1851 } 1852 } 1853 1854 bool hasSubcomp = !xbox.subcomponent().empty(); 1855 mlir::Value stepExpr; 1856 if (hasSubcomp) { 1857 // We have a subcomponent. The step value needs to be the number of 1858 // bytes per element (which is a derived type). 1859 mlir::Type ty0 = base.getType(); 1860 [[maybe_unused]] auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1861 assert(ptrTy && "expected pointer type"); 1862 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1863 assert(memEleTy && "expected fir pointer type"); 1864 auto seqTy = memEleTy.dyn_cast<fir::SequenceType>(); 1865 assert(seqTy && "expected sequence type"); 1866 mlir::Type seqEleTy = seqTy.getEleTy(); 1867 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1868 stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1869 } 1870 1871 // Process the array subspace arguments (shape, shift, etc.), if any, 1872 // translating everything to values in the descriptor wherever the entity 1873 // has a dynamic array dimension. 1874 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1875 mlir::Value extent = operands[shapeOffset]; 1876 mlir::Value outerExtent = extent; 1877 bool skipNext = false; 1878 if (hasSlice) { 1879 mlir::Value off = operands[sliceOffset]; 1880 mlir::Value adj = one; 1881 if (hasShift) 1882 adj = operands[shiftOffset]; 1883 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1884 if (constRows > 0) { 1885 gepArgs.push_back(ao); 1886 --constRows; 1887 } else { 1888 auto dimOff = 1889 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1890 ptrOffset = 1891 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1892 } 1893 if (mlir::isa_and_nonnull<fir::UndefOp>( 1894 xbox.slice()[3 * di + 1].getDefiningOp())) { 1895 // This dimension contains a scalar expression in the array slice op. 1896 // The dimension is loop invariant, will be dropped, and will not 1897 // appear in the descriptor. 1898 skipNext = true; 1899 } 1900 } 1901 if (!skipNext) { 1902 // store lower bound (normally 0) 1903 mlir::Value lb = zero; 1904 if (eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>()) { 1905 lb = one; 1906 if (hasShift) 1907 lb = operands[shiftOffset]; 1908 } 1909 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1910 1911 // store extent 1912 if (hasSlice) 1913 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1914 operands[sliceOffset + 1], 1915 operands[sliceOffset + 2], zero, i64Ty); 1916 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1917 1918 // store step (scaled by shaped extent) 1919 1920 mlir::Value step = hasSubcomp ? stepExpr : prevDim; 1921 if (hasSlice) 1922 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1923 operands[sliceOffset + 2]); 1924 dest = insertStride(rewriter, loc, dest, descIdx, step); 1925 ++descIdx; 1926 } 1927 1928 // compute the stride and offset for the next natural dimension 1929 prevDim = 1930 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent); 1931 if (constRows == 0) 1932 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1933 outerExtent); 1934 1935 // increment iterators 1936 ++shapeOffset; 1937 if (hasShift) 1938 ++shiftOffset; 1939 if (hasSlice) 1940 sliceOffset += 3; 1941 } 1942 if (hasSlice || hasSubcomp || !xbox.substr().empty()) { 1943 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1944 args.append(gepArgs.rbegin(), gepArgs.rend()); 1945 if (hasSubcomp) { 1946 // For each field in the path add the offset to base via the args list. 1947 // In the most general case, some offsets must be computed since 1948 // they are not be known until runtime. 1949 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1950 fir::unwrapPassByRefType(xbox.memref().getType())))) 1951 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1952 args.append(operands.begin() + xbox.subcomponentOffset(), 1953 operands.begin() + xbox.subcomponentOffset() + 1954 xbox.subcomponent().size()); 1955 } 1956 base = 1957 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1958 if (!xbox.substr().empty()) 1959 base = shiftSubstringBase(rewriter, loc, base, 1960 operands[xbox.substrOffset()]); 1961 } 1962 dest = insertBaseAddress(rewriter, loc, dest, base); 1963 if (isDerivedTypeWithLenParams(boxTy)) 1964 TODO(loc, "fir.embox codegen of derived with length parameters"); 1965 1966 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1967 rewriter.replaceOp(xbox, result); 1968 return success(); 1969 } 1970 }; 1971 1972 /// Create a new box given a box reference. 1973 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1974 using EmboxCommonConversion::EmboxCommonConversion; 1975 1976 mlir::LogicalResult 1977 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1978 mlir::ConversionPatternRewriter &rewriter) const override { 1979 mlir::Location loc = rebox.getLoc(); 1980 mlir::Type idxTy = lowerTy().indexType(); 1981 mlir::Value loweredBox = adaptor.getOperands()[0]; 1982 mlir::ValueRange operands = adaptor.getOperands(); 1983 1984 // Create new descriptor and fill its non-shape related data. 1985 llvm::SmallVector<mlir::Value, 2> lenParams; 1986 mlir::Type inputEleTy = getInputEleTy(rebox); 1987 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1988 mlir::Value len = 1989 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1990 if (charTy.getFKind() != 1) { 1991 mlir::Value width = 1992 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1993 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1994 } 1995 lenParams.emplace_back(len); 1996 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1997 if (recTy.getNumLenParams() != 0) 1998 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1999 } 2000 auto [boxTy, dest, eleSize] = 2001 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 2002 2003 // Read input extents, strides, and base address 2004 llvm::SmallVector<mlir::Value> inputExtents; 2005 llvm::SmallVector<mlir::Value> inputStrides; 2006 const unsigned inputRank = rebox.getRank(); 2007 for (unsigned i = 0; i < inputRank; ++i) { 2008 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 2009 SmallVector<mlir::Value, 3> dimInfo = 2010 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 2011 inputExtents.emplace_back(dimInfo[1]); 2012 inputStrides.emplace_back(dimInfo[2]); 2013 } 2014 2015 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 2016 mlir::Value baseAddr = 2017 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 2018 2019 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 2020 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 2021 operands, rewriter); 2022 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 2023 operands, rewriter); 2024 } 2025 2026 private: 2027 /// Write resulting shape and base address in descriptor, and replace rebox 2028 /// op. 2029 mlir::LogicalResult 2030 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2031 mlir::ValueRange lbounds, mlir::ValueRange extents, 2032 mlir::ValueRange strides, 2033 mlir::ConversionPatternRewriter &rewriter) const { 2034 mlir::Location loc = rebox.getLoc(); 2035 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 2036 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 2037 unsigned dim = iter.index(); 2038 mlir::Value lb = lbounds.empty() ? one : lbounds[dim]; 2039 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 2040 dest = insertExtent(rewriter, loc, dest, dim, std::get<0>(iter.value())); 2041 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 2042 } 2043 dest = insertBaseAddress(rewriter, loc, dest, base); 2044 mlir::Value result = 2045 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 2046 rewriter.replaceOp(rebox, result); 2047 return success(); 2048 } 2049 2050 // Apply slice given the base address, extents and strides of the input box. 2051 mlir::LogicalResult 2052 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2053 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 2054 mlir::ValueRange operands, 2055 mlir::ConversionPatternRewriter &rewriter) const { 2056 mlir::Location loc = rebox.getLoc(); 2057 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 2058 mlir::Type idxTy = lowerTy().indexType(); 2059 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2060 // Apply subcomponent and substring shift on base address. 2061 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 2062 // Cast to inputEleTy* so that a GEP can be used. 2063 mlir::Type inputEleTy = getInputEleTy(rebox); 2064 auto llvmElePtrTy = 2065 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 2066 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 2067 2068 if (!rebox.subcomponent().empty()) { 2069 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 2070 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 2071 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 2072 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 2073 } 2074 if (!rebox.substr().empty()) 2075 base = shiftSubstringBase(rewriter, loc, base, 2076 operands[rebox.substrOffset()]); 2077 } 2078 2079 if (rebox.slice().empty()) 2080 // The array section is of the form array[%component][substring], keep 2081 // the input array extents and strides. 2082 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 2083 inputExtents, inputStrides, rewriter); 2084 2085 // Strides from the fir.box are in bytes. 2086 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2087 2088 // The slice is of the form array(i:j:k)[%component]. Compute new extents 2089 // and strides. 2090 llvm::SmallVector<mlir::Value> slicedExtents; 2091 llvm::SmallVector<mlir::Value> slicedStrides; 2092 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2093 const bool sliceHasOrigins = !rebox.shift().empty(); 2094 unsigned sliceOps = rebox.sliceOffset(); 2095 unsigned shiftOps = rebox.shiftOffset(); 2096 auto strideOps = inputStrides.begin(); 2097 const unsigned inputRank = inputStrides.size(); 2098 for (unsigned i = 0; i < inputRank; 2099 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 2100 mlir::Value sliceLb = 2101 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 2102 mlir::Value inputStride = *strideOps; // already idxTy 2103 // Apply origin shift: base += (lb-shift)*input_stride 2104 mlir::Value sliceOrigin = 2105 sliceHasOrigins 2106 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 2107 : one; 2108 mlir::Value diff = 2109 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 2110 mlir::Value offset = 2111 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 2112 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 2113 // Apply upper bound and step if this is a triplet. Otherwise, the 2114 // dimension is dropped and no extents/strides are computed. 2115 mlir::Value upper = operands[sliceOps + 1]; 2116 const bool isTripletSlice = 2117 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 2118 if (isTripletSlice) { 2119 mlir::Value step = 2120 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 2121 // extent = ub-lb+step/step 2122 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 2123 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 2124 sliceUb, step, zero, idxTy); 2125 slicedExtents.emplace_back(extent); 2126 // stride = step*input_stride 2127 mlir::Value stride = 2128 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 2129 slicedStrides.emplace_back(stride); 2130 } 2131 } 2132 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 2133 slicedExtents, slicedStrides, rewriter); 2134 } 2135 2136 /// Apply a new shape to the data described by a box given the base address, 2137 /// extents and strides of the box. 2138 mlir::LogicalResult 2139 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2140 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 2141 mlir::ValueRange operands, 2142 mlir::ConversionPatternRewriter &rewriter) const { 2143 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 2144 operands.begin() + rebox.shiftOffset() + 2145 rebox.shift().size()}; 2146 if (rebox.shape().empty()) { 2147 // Only setting new lower bounds. 2148 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 2149 inputStrides, rewriter); 2150 } 2151 2152 mlir::Location loc = rebox.getLoc(); 2153 // Strides from the fir.box are in bytes. 2154 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 2155 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2156 2157 llvm::SmallVector<mlir::Value> newStrides; 2158 llvm::SmallVector<mlir::Value> newExtents; 2159 mlir::Type idxTy = lowerTy().indexType(); 2160 // First stride from input box is kept. The rest is assumed contiguous 2161 // (it is not possible to reshape otherwise). If the input is scalar, 2162 // which may be OK if all new extents are ones, the stride does not 2163 // matter, use one. 2164 mlir::Value stride = inputStrides.empty() 2165 ? genConstantIndex(loc, idxTy, rewriter, 1) 2166 : inputStrides[0]; 2167 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 2168 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 2169 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 2170 newExtents.emplace_back(extent); 2171 newStrides.emplace_back(stride); 2172 // nextStride = extent * stride; 2173 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 2174 } 2175 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 2176 rewriter); 2177 } 2178 2179 /// Return scalar element type of the input box. 2180 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 2181 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 2182 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 2183 return seqTy.getEleTy(); 2184 return ty; 2185 } 2186 }; 2187 2188 // Code shared between insert_value and extract_value Ops. 2189 struct ValueOpCommon { 2190 // Translate the arguments pertaining to any multidimensional array to 2191 // row-major order for LLVM-IR. 2192 static void toRowMajor(SmallVectorImpl<mlir::Attribute> &attrs, 2193 mlir::Type ty) { 2194 assert(ty && "type is null"); 2195 const auto end = attrs.size(); 2196 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 2197 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2198 const auto dim = getDimension(seq); 2199 if (dim > 1) { 2200 auto ub = std::min(i + dim, end); 2201 std::reverse(attrs.begin() + i, attrs.begin() + ub); 2202 i += dim - 1; 2203 } 2204 ty = getArrayElementType(seq); 2205 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 2206 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 2207 } else { 2208 llvm_unreachable("index into invalid type"); 2209 } 2210 } 2211 } 2212 2213 static llvm::SmallVector<mlir::Attribute> 2214 collectIndices(mlir::ConversionPatternRewriter &rewriter, 2215 mlir::ArrayAttr arrAttr) { 2216 llvm::SmallVector<mlir::Attribute> attrs; 2217 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 2218 if (i->isa<mlir::IntegerAttr>()) { 2219 attrs.push_back(*i); 2220 } else { 2221 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 2222 ++i; 2223 auto ty = i->cast<mlir::TypeAttr>().getValue(); 2224 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 2225 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 2226 } 2227 } 2228 return attrs; 2229 } 2230 2231 private: 2232 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 2233 unsigned result = 1; 2234 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 2235 eleTy; 2236 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 2237 ++result; 2238 return result; 2239 } 2240 2241 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 2242 auto eleTy = ty.getElementType(); 2243 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2244 eleTy = arrTy.getElementType(); 2245 return eleTy; 2246 } 2247 }; 2248 2249 namespace { 2250 /// Extract a subobject value from an ssa-value of aggregate type 2251 struct ExtractValueOpConversion 2252 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 2253 public ValueOpCommon { 2254 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2255 2256 mlir::LogicalResult 2257 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 2258 mlir::ConversionPatternRewriter &rewriter) const override { 2259 auto attrs = collectIndices(rewriter, extractVal.coor()); 2260 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 2261 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 2262 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 2263 extractVal, ty, adaptor.getOperands()[0], position); 2264 return success(); 2265 } 2266 }; 2267 2268 /// InsertValue is the generalized instruction for the composition of new 2269 /// aggregate type values. 2270 struct InsertValueOpConversion 2271 : public FIROpAndTypeConversion<fir::InsertValueOp>, 2272 public ValueOpCommon { 2273 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2274 2275 mlir::LogicalResult 2276 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 2277 mlir::ConversionPatternRewriter &rewriter) const override { 2278 auto attrs = collectIndices(rewriter, insertVal.coor()); 2279 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 2280 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 2281 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2282 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 2283 position); 2284 return success(); 2285 } 2286 }; 2287 2288 /// InsertOnRange inserts a value into a sequence over a range of offsets. 2289 struct InsertOnRangeOpConversion 2290 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 2291 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2292 2293 // Increments an array of subscripts in a row major fasion. 2294 void incrementSubscripts(const SmallVector<uint64_t> &dims, 2295 SmallVector<uint64_t> &subscripts) const { 2296 for (size_t i = dims.size(); i > 0; --i) { 2297 if (++subscripts[i - 1] < dims[i - 1]) { 2298 return; 2299 } 2300 subscripts[i - 1] = 0; 2301 } 2302 } 2303 2304 mlir::LogicalResult 2305 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 2306 mlir::ConversionPatternRewriter &rewriter) const override { 2307 2308 llvm::SmallVector<uint64_t> dims; 2309 auto type = adaptor.getOperands()[0].getType(); 2310 2311 // Iteratively extract the array dimensions from the type. 2312 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2313 dims.push_back(t.getNumElements()); 2314 type = t.getElementType(); 2315 } 2316 2317 SmallVector<uint64_t> lBounds; 2318 SmallVector<uint64_t> uBounds; 2319 2320 // Unzip the upper and lower bound and convert to a row major format. 2321 mlir::DenseIntElementsAttr coor = range.coor(); 2322 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2323 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2324 uBounds.push_back(*i++); 2325 lBounds.push_back(*i); 2326 } 2327 2328 auto &subscripts = lBounds; 2329 auto loc = range.getLoc(); 2330 mlir::Value lastOp = adaptor.getOperands()[0]; 2331 mlir::Value insertVal = adaptor.getOperands()[1]; 2332 2333 auto i64Ty = rewriter.getI64Type(); 2334 while (subscripts != uBounds) { 2335 // Convert uint64_t's to Attribute's. 2336 SmallVector<mlir::Attribute> subscriptAttrs; 2337 for (const auto &subscript : subscripts) 2338 subscriptAttrs.push_back(IntegerAttr::get(i64Ty, subscript)); 2339 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2340 loc, ty, lastOp, insertVal, 2341 ArrayAttr::get(range.getContext(), subscriptAttrs)); 2342 2343 incrementSubscripts(dims, subscripts); 2344 } 2345 2346 // Convert uint64_t's to Attribute's. 2347 SmallVector<mlir::Attribute> subscriptAttrs; 2348 for (const auto &subscript : subscripts) 2349 subscriptAttrs.push_back( 2350 IntegerAttr::get(rewriter.getI64Type(), subscript)); 2351 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2352 2353 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2354 range, ty, lastOp, insertVal, 2355 ArrayAttr::get(range.getContext(), arrayRef)); 2356 2357 return success(); 2358 } 2359 }; 2360 } // namespace 2361 2362 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2363 /// shifted etc. array. 2364 /// (See the static restriction on coordinate_of.) array_coor determines the 2365 /// coordinate (location) of a specific element. 2366 struct XArrayCoorOpConversion 2367 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2368 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2369 2370 mlir::LogicalResult 2371 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2372 mlir::ConversionPatternRewriter &rewriter) const override { 2373 auto loc = coor.getLoc(); 2374 mlir::ValueRange operands = adaptor.getOperands(); 2375 unsigned rank = coor.getRank(); 2376 assert(coor.indices().size() == rank); 2377 assert(coor.shape().empty() || coor.shape().size() == rank); 2378 assert(coor.shift().empty() || coor.shift().size() == rank); 2379 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2380 mlir::Type idxTy = lowerTy().indexType(); 2381 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2382 mlir::Value prevExt = one; 2383 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2384 mlir::Value offset = zero; 2385 const bool isShifted = !coor.shift().empty(); 2386 const bool isSliced = !coor.slice().empty(); 2387 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2388 2389 auto indexOps = coor.indices().begin(); 2390 auto shapeOps = coor.shape().begin(); 2391 auto shiftOps = coor.shift().begin(); 2392 auto sliceOps = coor.slice().begin(); 2393 // For each dimension of the array, generate the offset calculation. 2394 for (unsigned i = 0; i < rank; 2395 ++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) { 2396 mlir::Value index = 2397 integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); 2398 mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, 2399 operands[coor.shiftOffset() + i]) 2400 : one; 2401 mlir::Value step = one; 2402 bool normalSlice = isSliced; 2403 // Compute zero based index in dimension i of the element, applying 2404 // potential triplets and lower bounds. 2405 if (isSliced) { 2406 mlir::Value ub = *(sliceOps + 1); 2407 normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp()); 2408 if (normalSlice) 2409 step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2)); 2410 } 2411 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2412 mlir::Value diff = 2413 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2414 if (normalSlice) { 2415 mlir::Value sliceLb = 2416 integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]); 2417 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2418 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2419 } 2420 // Update the offset given the stride and the zero based index `diff` 2421 // that was just computed. 2422 if (baseIsBoxed) { 2423 // Use stride in bytes from the descriptor. 2424 mlir::Value stride = 2425 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2426 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2427 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2428 } else { 2429 // Use stride computed at last iteration. 2430 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2431 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2432 // Compute next stride assuming contiguity of the base array 2433 // (in element number). 2434 auto nextExt = 2435 integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]); 2436 prevExt = 2437 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2438 } 2439 } 2440 2441 // Add computed offset to the base address. 2442 if (baseIsBoxed) { 2443 // Working with byte offsets. The base address is read from the fir.box. 2444 // and need to be casted to i8* to do the pointer arithmetic. 2445 mlir::Type baseTy = 2446 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2447 mlir::Value base = 2448 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2449 mlir::Type voidPtrTy = getVoidPtrType(); 2450 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2451 llvm::SmallVector<mlir::Value> args{offset}; 2452 auto addr = 2453 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2454 if (coor.subcomponent().empty()) { 2455 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2456 return success(); 2457 } 2458 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2459 args.clear(); 2460 args.push_back(zero); 2461 if (!coor.lenParams().empty()) { 2462 // If type parameters are present, then we don't want to use a GEPOp 2463 // as below, as the LLVM struct type cannot be statically defined. 2464 TODO(loc, "derived type with type parameters"); 2465 } 2466 // TODO: array offset subcomponents must be converted to LLVM's 2467 // row-major layout here. 2468 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2469 args.push_back(operands[i]); 2470 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2471 args); 2472 return success(); 2473 } 2474 2475 // The array was not boxed, so it must be contiguous. offset is therefore an 2476 // element offset and the base type is kept in the GEP unless the element 2477 // type size is itself dynamic. 2478 mlir::Value base; 2479 if (coor.subcomponent().empty()) { 2480 // No subcomponent. 2481 if (!coor.lenParams().empty()) { 2482 // Type parameters. Adjust element size explicitly. 2483 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2484 assert(eleTy && "result must be a reference-like type"); 2485 if (fir::characterWithDynamicLen(eleTy)) { 2486 assert(coor.lenParams().size() == 1); 2487 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2488 eleTy.cast<fir::CharacterType>().getFKind()); 2489 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2490 auto scaledBySize = 2491 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2492 auto length = 2493 integerCast(loc, rewriter, idxTy, 2494 adaptor.getOperands()[coor.lenParamsOffset()]); 2495 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2496 length); 2497 } else { 2498 TODO(loc, "compute size of derived type with type parameters"); 2499 } 2500 } 2501 // Cast the base address to a pointer to T. 2502 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2503 adaptor.getOperands()[0]); 2504 } else { 2505 // Operand #0 must have a pointer type. For subcomponent slicing, we 2506 // want to cast away the array type and have a plain struct type. 2507 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2508 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2509 assert(ptrTy && "expected pointer type"); 2510 mlir::Type eleTy = ptrTy.getElementType(); 2511 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2512 eleTy = arrTy.getElementType(); 2513 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2514 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2515 adaptor.getOperands()[0]); 2516 } 2517 SmallVector<mlir::Value> args = {offset}; 2518 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2519 args.push_back(operands[i]); 2520 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2521 return success(); 2522 } 2523 }; 2524 2525 // 2526 // Primitive operations on Complex types 2527 // 2528 2529 /// Generate inline code for complex addition/subtraction 2530 template <typename LLVMOP, typename OPTY> 2531 static mlir::LLVM::InsertValueOp 2532 complexSum(OPTY sumop, mlir::ValueRange opnds, 2533 mlir::ConversionPatternRewriter &rewriter, 2534 fir::LLVMTypeConverter &lowering) { 2535 mlir::Value a = opnds[0]; 2536 mlir::Value b = opnds[1]; 2537 auto loc = sumop.getLoc(); 2538 auto ctx = sumop.getContext(); 2539 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2540 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2541 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 2542 mlir::Type ty = lowering.convertType(sumop.getType()); 2543 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2544 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2545 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2546 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2547 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 2548 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 2549 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2550 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 2551 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 2552 } 2553 2554 namespace { 2555 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 2556 using FIROpConversion::FIROpConversion; 2557 2558 mlir::LogicalResult 2559 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 2560 mlir::ConversionPatternRewriter &rewriter) const override { 2561 // given: (x + iy) + (x' + iy') 2562 // result: (x + x') + i(y + y') 2563 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 2564 rewriter, lowerTy()); 2565 rewriter.replaceOp(addc, r.getResult()); 2566 return success(); 2567 } 2568 }; 2569 2570 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 2571 using FIROpConversion::FIROpConversion; 2572 2573 mlir::LogicalResult 2574 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 2575 mlir::ConversionPatternRewriter &rewriter) const override { 2576 // given: (x + iy) - (x' + iy') 2577 // result: (x - x') + i(y - y') 2578 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 2579 rewriter, lowerTy()); 2580 rewriter.replaceOp(subc, r.getResult()); 2581 return success(); 2582 } 2583 }; 2584 2585 /// Inlined complex multiply 2586 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 2587 using FIROpConversion::FIROpConversion; 2588 2589 mlir::LogicalResult 2590 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 2591 mlir::ConversionPatternRewriter &rewriter) const override { 2592 // TODO: Can we use a call to __muldc3 ? 2593 // given: (x + iy) * (x' + iy') 2594 // result: (xx'-yy')+i(xy'+yx') 2595 mlir::Value a = adaptor.getOperands()[0]; 2596 mlir::Value b = adaptor.getOperands()[1]; 2597 auto loc = mulc.getLoc(); 2598 auto *ctx = mulc.getContext(); 2599 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2600 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2601 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 2602 mlir::Type ty = convertType(mulc.getType()); 2603 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2604 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2605 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2606 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2607 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 2608 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 2609 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 2610 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 2611 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 2612 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 2613 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2614 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 2615 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 2616 rewriter.replaceOp(mulc, r0.getResult()); 2617 return success(); 2618 } 2619 }; 2620 2621 /// Inlined complex division 2622 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 2623 using FIROpConversion::FIROpConversion; 2624 2625 mlir::LogicalResult 2626 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 2627 mlir::ConversionPatternRewriter &rewriter) const override { 2628 // TODO: Can we use a call to __divdc3 instead? 2629 // Just generate inline code for now. 2630 // given: (x + iy) / (x' + iy') 2631 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 2632 mlir::Value a = adaptor.getOperands()[0]; 2633 mlir::Value b = adaptor.getOperands()[1]; 2634 auto loc = divc.getLoc(); 2635 auto *ctx = divc.getContext(); 2636 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2637 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2638 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 2639 mlir::Type ty = convertType(divc.getType()); 2640 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2641 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2642 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2643 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2644 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 2645 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 2646 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 2647 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 2648 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 2649 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 2650 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 2651 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 2652 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 2653 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 2654 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 2655 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2656 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 2657 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 2658 rewriter.replaceOp(divc, r0.getResult()); 2659 return success(); 2660 } 2661 }; 2662 2663 /// Inlined complex negation 2664 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 2665 using FIROpConversion::FIROpConversion; 2666 2667 mlir::LogicalResult 2668 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 2669 mlir::ConversionPatternRewriter &rewriter) const override { 2670 // given: -(x + iy) 2671 // result: -x - iy 2672 auto *ctxt = neg.getContext(); 2673 auto eleTy = convertType(getComplexEleTy(neg.getType())); 2674 auto ty = convertType(neg.getType()); 2675 auto loc = neg.getLoc(); 2676 mlir::Value o0 = adaptor.getOperands()[0]; 2677 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 2678 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 2679 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 2680 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 2681 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 2682 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 2683 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 2684 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 2685 return success(); 2686 } 2687 }; 2688 2689 /// Conversion pattern for operation that must be dead. The information in these 2690 /// operations is used by other operation. At this point they should not have 2691 /// anymore uses. 2692 /// These operations are normally dead after the pre-codegen pass. 2693 template <typename FromOp> 2694 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 2695 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering) 2696 : FIROpConversion<FromOp>(lowering) {} 2697 using OpAdaptor = typename FromOp::Adaptor; 2698 2699 mlir::LogicalResult 2700 matchAndRewrite(FromOp op, OpAdaptor adaptor, 2701 mlir::ConversionPatternRewriter &rewriter) const final { 2702 if (!op->getUses().empty()) 2703 return rewriter.notifyMatchFailure(op, "op must be dead"); 2704 rewriter.eraseOp(op); 2705 return success(); 2706 } 2707 }; 2708 2709 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 2710 using MustBeDeadConversion::MustBeDeadConversion; 2711 }; 2712 2713 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 2714 using MustBeDeadConversion::MustBeDeadConversion; 2715 }; 2716 2717 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 2718 using MustBeDeadConversion::MustBeDeadConversion; 2719 }; 2720 2721 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 2722 using MustBeDeadConversion::MustBeDeadConversion; 2723 }; 2724 2725 /// `fir.is_present` --> 2726 /// ``` 2727 /// %0 = llvm.mlir.constant(0 : i64) 2728 /// %1 = llvm.ptrtoint %0 2729 /// %2 = llvm.icmp "ne" %1, %0 : i64 2730 /// ``` 2731 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 2732 using FIROpConversion::FIROpConversion; 2733 2734 mlir::LogicalResult 2735 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 2736 mlir::ConversionPatternRewriter &rewriter) const override { 2737 mlir::Type idxTy = lowerTy().indexType(); 2738 mlir::Location loc = isPresent.getLoc(); 2739 auto ptr = adaptor.getOperands()[0]; 2740 2741 if (isPresent.val().getType().isa<fir::BoxCharType>()) { 2742 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 2743 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 2744 2745 mlir::Type ty = structTy.getBody()[0]; 2746 mlir::MLIRContext *ctx = isPresent.getContext(); 2747 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2748 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 2749 } 2750 mlir::LLVM::ConstantOp c0 = 2751 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 2752 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 2753 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 2754 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 2755 2756 return success(); 2757 } 2758 }; 2759 2760 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 2761 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 2762 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 2763 /// element is the length of the character buffer (`#n`). 2764 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 2765 using FIROpConversion::FIROpConversion; 2766 2767 mlir::LogicalResult 2768 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 2769 mlir::ConversionPatternRewriter &rewriter) const override { 2770 mlir::ValueRange operands = adaptor.getOperands(); 2771 MLIRContext *ctx = emboxChar.getContext(); 2772 2773 mlir::Value charBuffer = operands[0]; 2774 mlir::Value charBufferLen = operands[1]; 2775 2776 mlir::Location loc = emboxChar.getLoc(); 2777 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 2778 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 2779 2780 mlir::Type lenTy = 2781 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 2782 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 2783 2784 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2785 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2786 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2787 loc, llvmStructTy, llvmStruct, charBuffer, c0); 2788 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2789 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 2790 2791 return success(); 2792 } 2793 }; 2794 } // namespace 2795 2796 /// Construct an `llvm.extractvalue` instruction. It will return value at 2797 /// element \p x from \p tuple. 2798 static mlir::LLVM::ExtractValueOp 2799 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 2800 mlir::ConversionPatternRewriter &rewriter, 2801 mlir::MLIRContext *ctx, int x) { 2802 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 2803 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 2804 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 2805 } 2806 2807 namespace { 2808 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 2809 /// boxchar. 2810 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 2811 using FIROpConversion::FIROpConversion; 2812 2813 mlir::LogicalResult 2814 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 2815 mlir::ConversionPatternRewriter &rewriter) const override { 2816 mlir::Value boxChar = adaptor.getOperands()[0]; 2817 mlir::Location loc = boxChar.getLoc(); 2818 mlir::MLIRContext *ctx = boxChar.getContext(); 2819 mlir::Type returnValTy = boxCharLen.getResult().getType(); 2820 2821 constexpr int boxcharLenIdx = 1; 2822 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 2823 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 2824 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 2825 rewriter.replaceOp(boxCharLen, lenAfterCast); 2826 2827 return success(); 2828 } 2829 }; 2830 2831 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2832 /// the character buffer and one for the buffer length. 2833 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2834 using FIROpConversion::FIROpConversion; 2835 2836 mlir::LogicalResult 2837 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2838 mlir::ConversionPatternRewriter &rewriter) const override { 2839 MLIRContext *ctx = unboxchar.getContext(); 2840 2841 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2842 mlir::Value tuple = adaptor.getOperands()[0]; 2843 mlir::Type tupleTy = tuple.getType(); 2844 2845 mlir::Location loc = unboxchar.getLoc(); 2846 mlir::Value ptrToBuffer = 2847 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2848 2849 mlir::LLVM::ExtractValueOp len = 2850 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2851 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2852 2853 rewriter.replaceOp(unboxchar, 2854 ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2855 return success(); 2856 } 2857 }; 2858 2859 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 2860 /// components. 2861 /// TODO: Part of supporting Fortran 2003 procedure pointers. 2862 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 2863 using FIROpConversion::FIROpConversion; 2864 2865 mlir::LogicalResult 2866 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 2867 mlir::ConversionPatternRewriter &rewriter) const override { 2868 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 2869 return failure(); 2870 } 2871 }; 2872 2873 /// Convert `fir.field_index`. The conversion depends on whether the size of 2874 /// the record is static or dynamic. 2875 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2876 using FIROpConversion::FIROpConversion; 2877 2878 // NB: most field references should be resolved by this point 2879 mlir::LogicalResult 2880 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2881 mlir::ConversionPatternRewriter &rewriter) const override { 2882 auto recTy = field.on_type().cast<fir::RecordType>(); 2883 unsigned index = recTy.getFieldIndex(field.field_id()); 2884 2885 if (!fir::hasDynamicSize(recTy)) { 2886 // Derived type has compile-time constant layout. Return index of the 2887 // component type in the parent type (to be used in GEP). 2888 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2889 field.getLoc(), rewriter, index)}); 2890 return success(); 2891 } 2892 2893 // Derived type has compile-time constant layout. Call the compiler 2894 // generated function to determine the byte offset of the field at runtime. 2895 // This returns a non-constant. 2896 FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2897 field.getContext(), getOffsetMethodName(recTy, field.field_id())); 2898 NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2899 NamedAttribute fieldAttr = rewriter.getNamedAttr( 2900 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2901 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2902 field, lowerTy().offsetType(), adaptor.getOperands(), 2903 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2904 return success(); 2905 } 2906 2907 // Re-Construct the name of the compiler generated method that calculates the 2908 // offset 2909 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2910 llvm::StringRef field) { 2911 return recTy.getName().str() + "P." + field.str() + ".offset"; 2912 } 2913 }; 2914 2915 /// Convert to (memory) reference to a reference to a subobject. 2916 /// The coordinate_of op is a Swiss army knife operation that can be used on 2917 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2918 /// With unboxed arrays, there is the restriction that the array have a static 2919 /// shape in all but the last column. 2920 struct CoordinateOpConversion 2921 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2922 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2923 2924 mlir::LogicalResult 2925 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2926 mlir::ConversionPatternRewriter &rewriter) const override { 2927 mlir::ValueRange operands = adaptor.getOperands(); 2928 2929 mlir::Location loc = coor.getLoc(); 2930 mlir::Value base = operands[0]; 2931 mlir::Type baseObjectTy = coor.getBaseType(); 2932 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2933 assert(objectTy && "fir.coordinate_of expects a reference type"); 2934 2935 // Complex type - basically, extract the real or imaginary part 2936 if (fir::isa_complex(objectTy)) { 2937 mlir::LLVM::ConstantOp c0 = 2938 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2939 SmallVector<mlir::Value> offs = {c0, operands[1]}; 2940 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2941 rewriter.replaceOp(coor, gep); 2942 return success(); 2943 } 2944 2945 // Boxed type - get the base pointer from the box 2946 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2947 return doRewriteBox(coor, ty, operands, loc, rewriter); 2948 2949 // Reference or pointer type 2950 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType>()) 2951 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2952 2953 return rewriter.notifyMatchFailure( 2954 coor, "fir.coordinate_of base operand has unsupported type"); 2955 } 2956 2957 unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) const { 2958 return fir::hasDynamicSize(ty) 2959 ? op.getDefiningOp() 2960 ->getAttrOfType<mlir::IntegerAttr>("field") 2961 .getInt() 2962 : getIntValue(op); 2963 } 2964 2965 int64_t getIntValue(mlir::Value val) const { 2966 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2967 mlir::Operation *defop = val.getDefiningOp(); 2968 2969 if (auto constOp = dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2970 return constOp.value(); 2971 if (auto llConstOp = dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2972 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2973 return attr.getValue().getSExtValue(); 2974 fir::emitFatalError(val.getLoc(), "must be a constant"); 2975 } 2976 2977 bool hasSubDimensions(mlir::Type type) const { 2978 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2979 } 2980 2981 /// Check whether this form of `!fir.coordinate_of` is supported. These 2982 /// additional checks are required, because we are not yet able to convert 2983 /// all valid forms of `!fir.coordinate_of`. 2984 /// TODO: Either implement the unsupported cases or extend the verifier 2985 /// in FIROps.cpp instead. 2986 bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) const { 2987 const std::size_t numOfCoors = coors.size(); 2988 std::size_t i = 0; 2989 bool subEle = false; 2990 bool ptrEle = false; 2991 for (; i < numOfCoors; ++i) { 2992 mlir::Value nxtOpnd = coors[i]; 2993 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2994 subEle = true; 2995 i += arrTy.getDimension() - 1; 2996 type = arrTy.getEleTy(); 2997 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2998 subEle = true; 2999 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3000 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 3001 subEle = true; 3002 type = tupTy.getType(getIntValue(nxtOpnd)); 3003 } else { 3004 ptrEle = true; 3005 } 3006 } 3007 if (ptrEle) 3008 return (!subEle) && (numOfCoors == 1); 3009 return subEle && (i >= numOfCoors); 3010 } 3011 3012 /// Walk the abstract memory layout and determine if the path traverses any 3013 /// array types with unknown shape. Return true iff all the array types have a 3014 /// constant shape along the path. 3015 bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) const { 3016 const std::size_t sz = coors.size(); 3017 std::size_t i = 0; 3018 for (; i < sz; ++i) { 3019 mlir::Value nxtOpnd = coors[i]; 3020 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 3021 if (fir::sequenceWithNonConstantShape(arrTy)) 3022 return false; 3023 i += arrTy.getDimension() - 1; 3024 type = arrTy.getEleTy(); 3025 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 3026 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 3027 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 3028 type = strTy.getType(getIntValue(nxtOpnd)); 3029 } else { 3030 return true; 3031 } 3032 } 3033 return true; 3034 } 3035 3036 private: 3037 mlir::LogicalResult 3038 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 3039 mlir::Location loc, 3040 mlir::ConversionPatternRewriter &rewriter) const { 3041 mlir::Type boxObjTy = coor.getBaseType(); 3042 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 3043 3044 mlir::Value boxBaseAddr = operands[0]; 3045 3046 // 1. SPECIAL CASE (uses `fir.len_param_index`): 3047 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 3048 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 3049 // %addr = coordinate_of %box, %lenp 3050 if (coor.getNumOperands() == 2) { 3051 mlir::Operation *coordinateDef = (*coor.coor().begin()).getDefiningOp(); 3052 if (isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) { 3053 TODO(loc, 3054 "fir.coordinate_of - fir.len_param_index is not supported yet"); 3055 } 3056 } 3057 3058 // 2. GENERAL CASE: 3059 // 2.1. (`fir.array`) 3060 // %box = ... : !fix.box<!fir.array<?xU>> 3061 // %idx = ... : index 3062 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 3063 // 2.2 (`fir.derived`) 3064 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 3065 // %idx = ... : i32 3066 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 3067 // 2.3 (`fir.derived` inside `fir.array`) 3068 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, field_2:f32}>>> 3069 // %idx1 = ... : index 3070 // %idx2 = ... : i32 3071 // %resultAddr = coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 3072 // 2.4. TODO: Either document or disable any other case that the following 3073 // implementation might convert. 3074 mlir::LLVM::ConstantOp c0 = 3075 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 3076 mlir::Value resultAddr = 3077 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 3078 boxBaseAddr, rewriter); 3079 auto currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 3080 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 3081 3082 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 3083 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3084 if (i != 1) 3085 TODO(loc, "fir.array nested inside other array and/or derived type"); 3086 // Applies byte strides from the box. Ignore lower bound from box 3087 // since fir.coordinate_of indexes are zero based. Lowering takes care 3088 // of lower bound aspects. This both accounts for dynamically sized 3089 // types and non contiguous arrays. 3090 auto idxTy = lowerTy().indexType(); 3091 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 3092 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 3093 index < lastIndex; ++index) { 3094 mlir::Value stride = 3095 loadStrideFromBox(loc, operands[0], index - i, rewriter); 3096 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 3097 operands[index], stride); 3098 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 3099 } 3100 auto voidPtrBase = 3101 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 3102 SmallVector<mlir::Value> args{off}; 3103 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 3104 voidPtrBase, args); 3105 i += arrTy.getDimension() - 1; 3106 currentObjTy = arrTy.getEleTy(); 3107 } else if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) { 3108 auto recRefTy = 3109 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 3110 mlir::Value nxtOpnd = operands[i]; 3111 auto memObj = 3112 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 3113 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 3114 currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3115 auto llvmCurrentObjTy = lowerTy().convertType(currentObjTy); 3116 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 3117 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 3118 args); 3119 resultAddr = 3120 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 3121 } else { 3122 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 3123 } 3124 } 3125 3126 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 3127 return success(); 3128 } 3129 3130 mlir::LogicalResult 3131 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 3132 mlir::ValueRange operands, mlir::Location loc, 3133 mlir::ConversionPatternRewriter &rewriter) const { 3134 mlir::Type baseObjectTy = coor.getBaseType(); 3135 3136 mlir::Type currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 3137 bool hasSubdimension = hasSubDimensions(currentObjTy); 3138 bool columnIsDeferred = !hasSubdimension; 3139 3140 if (!supportedCoordinate(currentObjTy, operands.drop_front(1))) { 3141 TODO(loc, "unsupported combination of coordinate operands"); 3142 } 3143 3144 const bool hasKnownShape = 3145 arraysHaveKnownShape(currentObjTy, operands.drop_front(1)); 3146 3147 // If only the column is `?`, then we can simply place the column value in 3148 // the 0-th GEP position. 3149 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3150 if (!hasKnownShape) { 3151 const unsigned sz = arrTy.getDimension(); 3152 if (arraysHaveKnownShape(arrTy.getEleTy(), 3153 operands.drop_front(1 + sz))) { 3154 llvm::ArrayRef<int64_t> shape = arrTy.getShape(); 3155 bool allConst = true; 3156 for (unsigned i = 0; i < sz - 1; ++i) { 3157 if (shape[i] < 0) { 3158 allConst = false; 3159 break; 3160 } 3161 } 3162 if (allConst) 3163 columnIsDeferred = true; 3164 } 3165 } 3166 } 3167 3168 if (fir::hasDynamicSize(fir::unwrapSequenceType(currentObjTy))) { 3169 mlir::emitError( 3170 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 3171 return failure(); 3172 } 3173 3174 if (hasKnownShape || columnIsDeferred) { 3175 SmallVector<mlir::Value> offs; 3176 if (hasKnownShape && hasSubdimension) { 3177 mlir::LLVM::ConstantOp c0 = 3178 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 3179 offs.push_back(c0); 3180 } 3181 const std::size_t sz = operands.size(); 3182 Optional<int> dims; 3183 SmallVector<mlir::Value> arrIdx; 3184 for (std::size_t i = 1; i < sz; ++i) { 3185 mlir::Value nxtOpnd = operands[i]; 3186 3187 if (!currentObjTy) { 3188 mlir::emitError(loc, "invalid coordinate/check failed"); 3189 return failure(); 3190 } 3191 3192 // check if the i-th coordinate relates to an array 3193 if (dims.hasValue()) { 3194 arrIdx.push_back(nxtOpnd); 3195 int dimsLeft = *dims; 3196 if (dimsLeft > 1) { 3197 dims = dimsLeft - 1; 3198 continue; 3199 } 3200 currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy(); 3201 // append array range in reverse (FIR arrays are column-major) 3202 offs.append(arrIdx.rbegin(), arrIdx.rend()); 3203 arrIdx.clear(); 3204 dims.reset(); 3205 continue; 3206 } 3207 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3208 int d = arrTy.getDimension() - 1; 3209 if (d > 0) { 3210 dims = d; 3211 arrIdx.push_back(nxtOpnd); 3212 continue; 3213 } 3214 currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy(); 3215 offs.push_back(nxtOpnd); 3216 continue; 3217 } 3218 3219 // check if the i-th coordinate relates to a field 3220 if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) 3221 currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3222 else if (auto tupTy = currentObjTy.dyn_cast<mlir::TupleType>()) 3223 currentObjTy = tupTy.getType(getIntValue(nxtOpnd)); 3224 else 3225 currentObjTy = nullptr; 3226 3227 offs.push_back(nxtOpnd); 3228 } 3229 if (dims.hasValue()) 3230 offs.append(arrIdx.rbegin(), arrIdx.rend()); 3231 mlir::Value base = operands[0]; 3232 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 3233 rewriter.replaceOp(coor, retval); 3234 return success(); 3235 } 3236 3237 mlir::emitError(loc, "fir.coordinate_of base operand has unsupported type"); 3238 return failure(); 3239 } 3240 }; 3241 3242 } // namespace 3243 3244 namespace { 3245 /// Convert FIR dialect to LLVM dialect 3246 /// 3247 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3248 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3249 /// 3250 /// This pass is not complete yet. We are upstreaming it in small patches. 3251 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3252 public: 3253 mlir::ModuleOp getModule() { return getOperation(); } 3254 3255 void runOnOperation() override final { 3256 auto mod = getModule(); 3257 if (!forcedTargetTriple.empty()) { 3258 fir::setTargetTriple(mod, forcedTargetTriple); 3259 } 3260 3261 auto *context = getModule().getContext(); 3262 fir::LLVMTypeConverter typeConverter{getModule()}; 3263 mlir::OwningRewritePatternList pattern(context); 3264 pattern.insert< 3265 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3266 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3267 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3268 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3269 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3270 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3271 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3272 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3273 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3274 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3275 FreeMemOpConversion, HasValueOpConversion, GenTypeDescOpConversion, 3276 GlobalLenOpConversion, GlobalOpConversion, InsertOnRangeOpConversion, 3277 InsertValueOpConversion, IsPresentOpConversion, 3278 LenParamIndexOpConversion, LoadOpConversion, NegcOpConversion, 3279 NoReassocOpConversion, MulcOpConversion, SelectCaseOpConversion, 3280 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3281 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3282 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3283 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3284 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3285 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>( 3286 typeConverter); 3287 mlir::populateStdToLLVMConversionPatterns(typeConverter, pattern); 3288 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3289 pattern); 3290 mlir::ConversionTarget target{*context}; 3291 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3292 3293 // required NOPs for applying a full conversion 3294 target.addLegalOp<mlir::ModuleOp>(); 3295 3296 // apply the patterns 3297 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3298 std::move(pattern)))) { 3299 signalPassFailure(); 3300 } 3301 } 3302 }; 3303 } // namespace 3304 3305 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3306 return std::make_unique<FIRToLLVMLowering>(); 3307 } 3308