1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/TypeCode.h" 20 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 21 #include "mlir/Conversion/LLVMCommon/Pattern.h" 22 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 23 #include "mlir/IR/BuiltinTypes.h" 24 #include "mlir/IR/Matchers.h" 25 #include "mlir/Pass/Pass.h" 26 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 27 #include "llvm/ADT/ArrayRef.h" 28 29 #define DEBUG_TYPE "flang-codegen" 30 31 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 32 #include "TypeConverter.h" 33 34 // TODO: This should really be recovered from the specified target. 35 static constexpr unsigned defaultAlign = 8; 36 37 /// `fir.box` attribute values as defined for CFI_attribute_t in 38 /// flang/ISO_Fortran_binding.h. 39 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 40 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 41 42 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 43 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 44 } 45 46 static mlir::LLVM::ConstantOp 47 genConstantIndex(mlir::Location loc, mlir::Type ity, 48 mlir::ConversionPatternRewriter &rewriter, 49 std::int64_t offset) { 50 auto cattr = rewriter.getI64IntegerAttr(offset); 51 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 52 } 53 54 static Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 55 mlir::Block *insertBefore) { 56 assert(insertBefore && "expected valid insertion block"); 57 return rewriter.createBlock(insertBefore->getParent(), 58 mlir::Region::iterator(insertBefore)); 59 } 60 61 namespace { 62 /// FIR conversion pattern template 63 template <typename FromOp> 64 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 65 public: 66 explicit FIROpConversion(fir::LLVMTypeConverter &lowering) 67 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering) {} 68 69 protected: 70 mlir::Type convertType(mlir::Type ty) const { 71 return lowerTy().convertType(ty); 72 } 73 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 74 75 mlir::Type getVoidPtrType() const { 76 return mlir::LLVM::LLVMPointerType::get( 77 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 78 } 79 80 mlir::LLVM::ConstantOp 81 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 82 int value) const { 83 mlir::Type i32Ty = rewriter.getI32Type(); 84 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 85 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 86 } 87 88 mlir::LLVM::ConstantOp 89 genConstantOffset(mlir::Location loc, 90 mlir::ConversionPatternRewriter &rewriter, 91 int offset) const { 92 mlir::Type ity = lowerTy().offsetType(); 93 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 94 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 95 } 96 97 /// Construct code sequence to extract the specifc value from a `fir.box`. 98 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 99 mlir::Type resultTy, 100 mlir::ConversionPatternRewriter &rewriter, 101 unsigned boxValue) const { 102 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 103 mlir::LLVM::ConstantOp cValuePos = 104 genConstantOffset(loc, rewriter, boxValue); 105 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 106 auto p = rewriter.create<mlir::LLVM::GEPOp>( 107 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 108 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 109 } 110 111 /// Method to construct code sequence to get the triple for dimension `dim` 112 /// from a box. 113 SmallVector<mlir::Value, 3> 114 getDimsFromBox(mlir::Location loc, ArrayRef<mlir::Type> retTys, 115 mlir::Value box, mlir::Value dim, 116 mlir::ConversionPatternRewriter &rewriter) const { 117 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 118 mlir::LLVM::ConstantOp cDims = 119 genConstantOffset(loc, rewriter, kDimsPosInBox); 120 mlir::LLVM::LoadOp l0 = 121 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 122 mlir::LLVM::LoadOp l1 = 123 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 124 mlir::LLVM::LoadOp l2 = 125 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 126 return {l0.getResult(), l1.getResult(), l2.getResult()}; 127 } 128 129 mlir::LLVM::LoadOp 130 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 131 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 132 mlir::Type ty, 133 mlir::ConversionPatternRewriter &rewriter) const { 134 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 135 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 136 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 137 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 138 } 139 140 mlir::Value 141 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 142 mlir::ConversionPatternRewriter &rewriter) const { 143 auto idxTy = lowerTy().indexType(); 144 auto c0 = genConstantOffset(loc, rewriter, 0); 145 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 146 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 147 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 148 rewriter); 149 } 150 151 /// Read base address from a fir.box. Returned address has type ty. 152 mlir::Value 153 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 154 mlir::ConversionPatternRewriter &rewriter) const { 155 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 156 mlir::LLVM::ConstantOp cAddr = 157 genConstantOffset(loc, rewriter, kAddrPosInBox); 158 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 159 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 160 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 161 } 162 163 mlir::Value 164 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 165 mlir::ConversionPatternRewriter &rewriter) const { 166 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 167 mlir::LLVM::ConstantOp cElemLen = 168 genConstantOffset(loc, rewriter, kElemLenPosInBox); 169 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 170 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 171 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 172 } 173 174 // Load the attribute from the \p box and perform a check against \p maskValue 175 // The final comparison is implemented as `(attribute & maskValue) != 0`. 176 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 177 mlir::ConversionPatternRewriter &rewriter, 178 unsigned maskValue) const { 179 mlir::Type attrTy = rewriter.getI32Type(); 180 mlir::Value attribute = 181 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 182 mlir::LLVM::ConstantOp attrMask = 183 genConstantOffset(loc, rewriter, maskValue); 184 auto maskRes = 185 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 186 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 187 return rewriter.create<mlir::LLVM::ICmpOp>( 188 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 189 } 190 191 // Get the element type given an LLVM type that is of the form 192 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 193 static mlir::Type getBoxEleTy(mlir::Type type, 194 llvm::ArrayRef<unsigned> indexes) { 195 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 196 type = t.getElementType(); 197 for (auto i : indexes) { 198 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 199 assert(!t.isOpaque() && i < t.getBody().size()); 200 type = t.getBody()[i]; 201 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 202 type = t.getElementType(); 203 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 204 type = t.getElementType(); 205 } else { 206 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 207 "request for invalid box element type"); 208 } 209 } 210 return type; 211 } 212 213 // Return LLVM type of the base address given the LLVM type 214 // of the related descriptor (lowered fir.box type). 215 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 216 return getBoxEleTy(type, {kAddrPosInBox}); 217 } 218 219 template <typename... ARGS> 220 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 221 mlir::ConversionPatternRewriter &rewriter, 222 mlir::Value base, ARGS... args) const { 223 SmallVector<mlir::Value> cv{args...}; 224 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 225 } 226 227 /// Perform an extension or truncation as needed on an integer value. Lowering 228 /// to the specific target may involve some sign-extending or truncation of 229 /// values, particularly to fit them from abstract box types to the 230 /// appropriate reified structures. 231 mlir::Value integerCast(mlir::Location loc, 232 mlir::ConversionPatternRewriter &rewriter, 233 mlir::Type ty, mlir::Value val) const { 234 auto valTy = val.getType(); 235 // If the value was not yet lowered, lower its type so that it can 236 // be used in getPrimitiveTypeSizeInBits. 237 if (!valTy.isa<mlir::IntegerType>()) 238 valTy = convertType(valTy); 239 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 240 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 241 if (toSize < fromSize) 242 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 243 if (toSize > fromSize) 244 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 245 return val; 246 } 247 248 fir::LLVMTypeConverter &lowerTy() const { 249 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 250 } 251 }; 252 253 /// FIR conversion pattern template 254 template <typename FromOp> 255 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 256 public: 257 using FIROpConversion<FromOp>::FIROpConversion; 258 using OpAdaptor = typename FromOp::Adaptor; 259 260 mlir::LogicalResult 261 matchAndRewrite(FromOp op, OpAdaptor adaptor, 262 mlir::ConversionPatternRewriter &rewriter) const final { 263 mlir::Type ty = this->convertType(op.getType()); 264 return doRewrite(op, ty, adaptor, rewriter); 265 } 266 267 virtual mlir::LogicalResult 268 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 269 mlir::ConversionPatternRewriter &rewriter) const = 0; 270 }; 271 272 /// Create value signaling an absent optional argument in a call, e.g. 273 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 274 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 275 using FIROpConversion::FIROpConversion; 276 277 mlir::LogicalResult 278 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 279 mlir::ConversionPatternRewriter &rewriter) const override { 280 mlir::Type ty = convertType(absent.getType()); 281 mlir::Location loc = absent.getLoc(); 282 283 if (absent.getType().isa<fir::BoxCharType>()) { 284 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 285 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 286 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 287 auto nullField = 288 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 289 mlir::MLIRContext *ctx = absent.getContext(); 290 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 291 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 292 absent, ty, undefStruct, nullField, c0); 293 } else { 294 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 295 } 296 return success(); 297 } 298 }; 299 300 // Lower `fir.address_of` operation to `llvm.address_of` operation. 301 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 302 using FIROpConversion::FIROpConversion; 303 304 mlir::LogicalResult 305 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 306 mlir::ConversionPatternRewriter &rewriter) const override { 307 auto ty = convertType(addr.getType()); 308 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 309 addr, ty, addr.symbol().getRootReference().getValue()); 310 return success(); 311 } 312 }; 313 } // namespace 314 315 /// Lookup the function to compute the memory size of this parametric derived 316 /// type. The size of the object may depend on the LEN type parameters of the 317 /// derived type. 318 static mlir::LLVM::LLVMFuncOp 319 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 320 mlir::ConversionPatternRewriter &rewriter) { 321 auto module = op->getParentOfType<mlir::ModuleOp>(); 322 std::string name = recTy.getName().str() + "P.mem.size"; 323 return module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name); 324 } 325 326 namespace { 327 /// convert to LLVM IR dialect `alloca` 328 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 329 using FIROpConversion::FIROpConversion; 330 331 mlir::LogicalResult 332 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 333 mlir::ConversionPatternRewriter &rewriter) const override { 334 mlir::ValueRange operands = adaptor.getOperands(); 335 auto loc = alloc.getLoc(); 336 mlir::Type ity = lowerTy().indexType(); 337 unsigned i = 0; 338 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 339 mlir::Type ty = convertType(alloc.getType()); 340 mlir::Type resultTy = ty; 341 if (alloc.hasLenParams()) { 342 unsigned end = alloc.numLenParams(); 343 llvm::SmallVector<mlir::Value> lenParams; 344 for (; i < end; ++i) 345 lenParams.push_back(operands[i]); 346 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 347 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 348 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 349 chrTy.getContext(), chrTy.getFKind()); 350 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 351 assert(end == 1); 352 size = integerCast(loc, rewriter, ity, lenParams[0]); 353 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 354 mlir::LLVM::LLVMFuncOp memSizeFn = 355 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 356 if (!memSizeFn) 357 emitError(loc, "did not find allocation function"); 358 mlir::NamedAttribute attr = rewriter.getNamedAttr( 359 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 360 auto call = rewriter.create<mlir::LLVM::CallOp>( 361 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 362 size = call.getResult(0); 363 ty = mlir::LLVM::LLVMPointerType::get( 364 mlir::IntegerType::get(alloc.getContext(), 8)); 365 } else { 366 return emitError(loc, "unexpected type ") 367 << scalarType << " with type parameters"; 368 } 369 } 370 if (alloc.hasShapeOperands()) { 371 mlir::Type allocEleTy = fir::unwrapRefType(alloc.getType()); 372 // Scale the size by constant factors encoded in the array type. 373 // We only do this for arrays that don't have a constant interior, since 374 // those are the only ones that get decayed to a pointer to the element 375 // type. 376 if (auto seqTy = allocEleTy.dyn_cast<fir::SequenceType>()) { 377 if (!seqTy.hasConstantInterior()) { 378 fir::SequenceType::Extent constSize = 1; 379 for (auto extent : seqTy.getShape()) 380 if (extent != fir::SequenceType::getUnknownExtent()) 381 constSize *= extent; 382 mlir::Value constVal{ 383 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 384 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, constVal); 385 } 386 } 387 unsigned end = operands.size(); 388 for (; i < end; ++i) 389 size = rewriter.create<mlir::LLVM::MulOp>( 390 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 391 } 392 if (ty == resultTy) { 393 // Do not emit the bitcast if ty and resultTy are the same. 394 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 395 alloc->getAttrs()); 396 } else { 397 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 398 alloc->getAttrs()); 399 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 400 } 401 return success(); 402 } 403 }; 404 405 /// Lower `fir.box_addr` to the sequence of operations to extract the first 406 /// element of the box. 407 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 408 using FIROpConversion::FIROpConversion; 409 410 mlir::LogicalResult 411 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 412 mlir::ConversionPatternRewriter &rewriter) const override { 413 mlir::Value a = adaptor.getOperands()[0]; 414 auto loc = boxaddr.getLoc(); 415 mlir::Type ty = convertType(boxaddr.getType()); 416 if (auto argty = boxaddr.val().getType().dyn_cast<fir::BoxType>()) { 417 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 418 } else { 419 auto c0attr = rewriter.getI32IntegerAttr(0); 420 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 421 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 422 c0); 423 } 424 return success(); 425 } 426 }; 427 428 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 429 /// dimension infomartion from the boxed value. 430 /// Result in a triple set of GEPs and loads. 431 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 432 using FIROpConversion::FIROpConversion; 433 434 mlir::LogicalResult 435 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 436 mlir::ConversionPatternRewriter &rewriter) const override { 437 SmallVector<mlir::Type, 3> resultTypes = { 438 convertType(boxdims.getResult(0).getType()), 439 convertType(boxdims.getResult(1).getType()), 440 convertType(boxdims.getResult(2).getType()), 441 }; 442 auto results = 443 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 444 adaptor.getOperands()[1], rewriter); 445 rewriter.replaceOp(boxdims, results); 446 return success(); 447 } 448 }; 449 450 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 451 /// an element in the boxed value. 452 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 453 using FIROpConversion::FIROpConversion; 454 455 mlir::LogicalResult 456 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 457 mlir::ConversionPatternRewriter &rewriter) const override { 458 mlir::Value a = adaptor.getOperands()[0]; 459 auto loc = boxelesz.getLoc(); 460 auto ty = convertType(boxelesz.getType()); 461 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 462 rewriter.replaceOp(boxelesz, elemSize); 463 return success(); 464 } 465 }; 466 467 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 468 /// boxed value was from an ALLOCATABLE entity. 469 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 470 using FIROpConversion::FIROpConversion; 471 472 mlir::LogicalResult 473 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 474 mlir::ConversionPatternRewriter &rewriter) const override { 475 mlir::Value box = adaptor.getOperands()[0]; 476 auto loc = boxisalloc.getLoc(); 477 mlir::Value check = 478 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 479 rewriter.replaceOp(boxisalloc, check); 480 return success(); 481 } 482 }; 483 484 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 485 /// boxed is an array. 486 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 487 using FIROpConversion::FIROpConversion; 488 489 mlir::LogicalResult 490 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 491 mlir::ConversionPatternRewriter &rewriter) const override { 492 mlir::Value a = adaptor.getOperands()[0]; 493 auto loc = boxisarray.getLoc(); 494 auto rank = 495 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 496 auto c0 = genConstantOffset(loc, rewriter, 0); 497 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 498 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 499 return success(); 500 } 501 }; 502 503 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 504 /// boxed value was from a POINTER entity. 505 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 506 using FIROpConversion::FIROpConversion; 507 508 mlir::LogicalResult 509 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 510 mlir::ConversionPatternRewriter &rewriter) const override { 511 mlir::Value box = adaptor.getOperands()[0]; 512 auto loc = boxisptr.getLoc(); 513 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 514 rewriter.replaceOp(boxisptr, check); 515 return success(); 516 } 517 }; 518 519 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 520 /// the box. 521 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 522 using FIROpConversion::FIROpConversion; 523 524 mlir::LogicalResult 525 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 526 mlir::ConversionPatternRewriter &rewriter) const override { 527 mlir::Value a = adaptor.getOperands()[0]; 528 auto loc = boxrank.getLoc(); 529 mlir::Type ty = convertType(boxrank.getType()); 530 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 531 rewriter.replaceOp(boxrank, result); 532 return success(); 533 } 534 }; 535 536 /// Lower `fir.string_lit` to LLVM IR dialect operation. 537 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 538 using FIROpConversion::FIROpConversion; 539 540 mlir::LogicalResult 541 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 542 mlir::ConversionPatternRewriter &rewriter) const override { 543 auto ty = convertType(constop.getType()); 544 auto attr = constop.getValue(); 545 if (attr.isa<mlir::StringAttr>()) { 546 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 547 return success(); 548 } 549 550 auto arr = attr.cast<mlir::ArrayAttr>(); 551 auto charTy = constop.getType().cast<fir::CharacterType>(); 552 unsigned bits = lowerTy().characterBitsize(charTy); 553 mlir::Type intTy = rewriter.getIntegerType(bits); 554 auto attrs = llvm::map_range( 555 arr.getValue(), [intTy, bits](mlir::Attribute attr) -> Attribute { 556 return mlir::IntegerAttr::get( 557 intTy, 558 attr.cast<mlir::IntegerAttr>().getValue().sextOrTrunc(bits)); 559 }); 560 mlir::Type vecType = mlir::VectorType::get(arr.size(), intTy); 561 auto denseAttr = mlir::DenseElementsAttr::get( 562 vecType.cast<mlir::ShapedType>(), llvm::to_vector<8>(attrs)); 563 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>(constop, ty, 564 denseAttr); 565 return success(); 566 } 567 }; 568 569 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 570 /// boxproc. 571 /// TODO: Part of supporting Fortran 2003 procedure pointers. 572 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 573 using FIROpConversion::FIROpConversion; 574 575 mlir::LogicalResult 576 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 577 mlir::ConversionPatternRewriter &rewriter) const override { 578 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 579 return failure(); 580 } 581 }; 582 583 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 584 /// descriptor from the box. 585 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 586 using FIROpConversion::FIROpConversion; 587 588 mlir::LogicalResult 589 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 590 mlir::ConversionPatternRewriter &rewriter) const override { 591 mlir::Value box = adaptor.getOperands()[0]; 592 auto loc = boxtypedesc.getLoc(); 593 mlir::Type typeTy = 594 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 595 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 596 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 597 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 598 result); 599 return success(); 600 } 601 }; 602 603 // `fir.call` -> `llvm.call` 604 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 605 using FIROpConversion::FIROpConversion; 606 607 mlir::LogicalResult 608 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 609 mlir::ConversionPatternRewriter &rewriter) const override { 610 SmallVector<mlir::Type> resultTys; 611 for (auto r : call.getResults()) 612 resultTys.push_back(convertType(r.getType())); 613 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 614 call, resultTys, adaptor.getOperands(), call->getAttrs()); 615 return success(); 616 } 617 }; 618 } // namespace 619 620 static mlir::Type getComplexEleTy(mlir::Type complex) { 621 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 622 return cc.getElementType(); 623 return complex.cast<fir::ComplexType>().getElementType(); 624 } 625 626 namespace { 627 /// Compare complex values 628 /// 629 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 630 /// 631 /// For completeness, all other comparison are done on the real component only. 632 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 633 using FIROpConversion::FIROpConversion; 634 635 mlir::LogicalResult 636 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 637 mlir::ConversionPatternRewriter &rewriter) const override { 638 mlir::ValueRange operands = adaptor.getOperands(); 639 mlir::MLIRContext *ctxt = cmp.getContext(); 640 mlir::Type eleTy = convertType(getComplexEleTy(cmp.lhs().getType())); 641 mlir::Type resTy = convertType(cmp.getType()); 642 mlir::Location loc = cmp.getLoc(); 643 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 644 SmallVector<mlir::Value, 2> rp{rewriter.create<mlir::LLVM::ExtractValueOp>( 645 loc, eleTy, operands[0], pos0), 646 rewriter.create<mlir::LLVM::ExtractValueOp>( 647 loc, eleTy, operands[1], pos0)}; 648 auto rcp = 649 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 650 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 651 SmallVector<mlir::Value, 2> ip{rewriter.create<mlir::LLVM::ExtractValueOp>( 652 loc, eleTy, operands[0], pos1), 653 rewriter.create<mlir::LLVM::ExtractValueOp>( 654 loc, eleTy, operands[1], pos1)}; 655 auto icp = 656 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 657 SmallVector<mlir::Value, 2> cp{rcp, icp}; 658 switch (cmp.getPredicate()) { 659 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 660 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 661 break; 662 case mlir::arith::CmpFPredicate::UNE: // .NE. 663 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 664 break; 665 default: 666 rewriter.replaceOp(cmp, rcp.getResult()); 667 break; 668 } 669 return success(); 670 } 671 }; 672 673 /// Lower complex constants 674 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 675 using FIROpConversion::FIROpConversion; 676 677 mlir::LogicalResult 678 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 679 mlir::ConversionPatternRewriter &rewriter) const override { 680 mlir::Location loc = conc.getLoc(); 681 mlir::MLIRContext *ctx = conc.getContext(); 682 mlir::Type ty = convertType(conc.getType()); 683 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 684 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 685 auto realPart = 686 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 687 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 688 auto imPart = 689 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 690 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 691 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 692 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 693 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 694 loc, ty, undef, realPart, realIndex); 695 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 696 imPart, imIndex); 697 return success(); 698 } 699 700 inline APFloat getValue(mlir::Attribute attr) const { 701 return attr.cast<fir::RealAttr>().getValue(); 702 } 703 }; 704 705 /// convert value of from-type to value of to-type 706 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 707 using FIROpConversion::FIROpConversion; 708 709 static bool isFloatingPointTy(mlir::Type ty) { 710 return ty.isa<mlir::FloatType>(); 711 } 712 713 mlir::LogicalResult 714 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 715 mlir::ConversionPatternRewriter &rewriter) const override { 716 auto fromTy = convertType(convert.value().getType()); 717 auto toTy = convertType(convert.res().getType()); 718 mlir::Value op0 = adaptor.getOperands()[0]; 719 if (fromTy == toTy) { 720 rewriter.replaceOp(convert, op0); 721 return success(); 722 } 723 auto loc = convert.getLoc(); 724 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 725 unsigned toBits, mlir::Type toTy) -> mlir::Value { 726 if (fromBits == toBits) { 727 // TODO: Converting between two floating-point representations with the 728 // same bitwidth is not allowed for now. 729 mlir::emitError(loc, 730 "cannot implicitly convert between two floating-point " 731 "representations of the same bitwidth"); 732 return {}; 733 } 734 if (fromBits > toBits) 735 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 736 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 737 }; 738 // Complex to complex conversion. 739 if (fir::isa_complex(convert.value().getType()) && 740 fir::isa_complex(convert.res().getType())) { 741 // Special case: handle the conversion of a complex such that both the 742 // real and imaginary parts are converted together. 743 auto zero = mlir::ArrayAttr::get(convert.getContext(), 744 rewriter.getI32IntegerAttr(0)); 745 auto one = mlir::ArrayAttr::get(convert.getContext(), 746 rewriter.getI32IntegerAttr(1)); 747 auto ty = convertType(getComplexEleTy(convert.value().getType())); 748 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 749 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 750 auto nt = convertType(getComplexEleTy(convert.res().getType())); 751 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 752 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 753 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 754 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 755 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 756 auto i1 = 757 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 758 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 759 ic, one); 760 return mlir::success(); 761 } 762 // Floating point to floating point conversion. 763 if (isFloatingPointTy(fromTy)) { 764 if (isFloatingPointTy(toTy)) { 765 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 766 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 767 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 768 rewriter.replaceOp(convert, v); 769 return mlir::success(); 770 } 771 if (toTy.isa<mlir::IntegerType>()) { 772 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 773 return mlir::success(); 774 } 775 } else if (fromTy.isa<mlir::IntegerType>()) { 776 // Integer to integer conversion. 777 if (toTy.isa<mlir::IntegerType>()) { 778 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 779 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 780 assert(fromBits != toBits); 781 if (fromBits > toBits) { 782 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 783 return mlir::success(); 784 } 785 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 786 return mlir::success(); 787 } 788 // Integer to floating point conversion. 789 if (isFloatingPointTy(toTy)) { 790 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 791 return mlir::success(); 792 } 793 // Integer to pointer conversion. 794 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 795 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 796 return mlir::success(); 797 } 798 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 799 // Pointer to integer conversion. 800 if (toTy.isa<mlir::IntegerType>()) { 801 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 802 return mlir::success(); 803 } 804 // Pointer to pointer conversion. 805 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 806 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 807 return mlir::success(); 808 } 809 } 810 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 811 } 812 }; 813 814 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 815 /// table. 816 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 817 using FIROpConversion::FIROpConversion; 818 819 mlir::LogicalResult 820 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 821 mlir::ConversionPatternRewriter &rewriter) const override { 822 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 823 return failure(); 824 } 825 }; 826 827 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 828 /// derived type. 829 struct DispatchTableOpConversion 830 : public FIROpConversion<fir::DispatchTableOp> { 831 using FIROpConversion::FIROpConversion; 832 833 mlir::LogicalResult 834 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 835 mlir::ConversionPatternRewriter &rewriter) const override { 836 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 837 return failure(); 838 } 839 }; 840 841 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 842 /// method-name to a function. 843 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 844 using FIROpConversion::FIROpConversion; 845 846 mlir::LogicalResult 847 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 848 mlir::ConversionPatternRewriter &rewriter) const override { 849 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 850 return failure(); 851 } 852 }; 853 854 /// Lower `fir.global_len` operation. 855 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 856 using FIROpConversion::FIROpConversion; 857 858 mlir::LogicalResult 859 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 860 mlir::ConversionPatternRewriter &rewriter) const override { 861 TODO(globalLen.getLoc(), "fir.global_len codegen"); 862 return failure(); 863 } 864 }; 865 866 /// Lower fir.len_param_index 867 struct LenParamIndexOpConversion 868 : public FIROpConversion<fir::LenParamIndexOp> { 869 using FIROpConversion::FIROpConversion; 870 871 // FIXME: this should be specialized by the runtime target 872 mlir::LogicalResult 873 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 874 mlir::ConversionPatternRewriter &rewriter) const override { 875 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 876 } 877 }; 878 879 /// Lower `fir.gentypedesc` to a global constant. 880 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 881 using FIROpConversion::FIROpConversion; 882 883 mlir::LogicalResult 884 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 885 mlir::ConversionPatternRewriter &rewriter) const override { 886 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 887 return failure(); 888 } 889 }; 890 } // namespace 891 892 /// Return the LLVMFuncOp corresponding to the standard malloc call. 893 static mlir::LLVM::LLVMFuncOp 894 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 895 auto module = op->getParentOfType<mlir::ModuleOp>(); 896 if (mlir::LLVM::LLVMFuncOp mallocFunc = 897 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 898 return mallocFunc; 899 mlir::OpBuilder moduleBuilder( 900 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 901 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 902 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 903 rewriter.getUnknownLoc(), "malloc", 904 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 905 indexType, 906 /*isVarArg=*/false)); 907 } 908 909 /// Helper function for generating the LLVM IR that computes the size 910 /// in bytes for a derived type. 911 static mlir::Value 912 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 913 mlir::ConversionPatternRewriter &rewriter) { 914 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 915 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 916 llvm::SmallVector<mlir::Value> args{one}; 917 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 918 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 919 } 920 921 namespace { 922 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 923 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 924 using FIROpConversion::FIROpConversion; 925 926 mlir::LogicalResult 927 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 928 mlir::ConversionPatternRewriter &rewriter) const override { 929 mlir::Type ty = convertType(heap.getType()); 930 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 931 mlir::Location loc = heap.getLoc(); 932 auto ity = lowerTy().indexType(); 933 if (auto recTy = fir::unwrapSequenceType(heap.getAllocatedType()) 934 .dyn_cast<fir::RecordType>()) 935 if (recTy.getNumLenParams() != 0) { 936 TODO(loc, 937 "fir.allocmem codegen of derived type with length parameters"); 938 return failure(); 939 } 940 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 941 for (mlir::Value opnd : adaptor.getOperands()) 942 size = rewriter.create<mlir::LLVM::MulOp>( 943 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 944 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 945 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 946 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 947 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 948 malloc.getResult(0)); 949 return success(); 950 } 951 952 // Compute the (allocation) size of the allocmem type in bytes. 953 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 954 mlir::ConversionPatternRewriter &rewriter, 955 mlir::Type llTy) const { 956 // Use the primitive size, if available. 957 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 958 if (auto size = 959 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 960 return genConstantIndex(loc, idxTy, rewriter, size / 8); 961 962 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 963 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 964 } 965 }; 966 } // namespace 967 968 /// Return the LLVMFuncOp corresponding to the standard free call. 969 static mlir::LLVM::LLVMFuncOp 970 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 971 auto module = op->getParentOfType<mlir::ModuleOp>(); 972 if (mlir::LLVM::LLVMFuncOp freeFunc = 973 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 974 return freeFunc; 975 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 976 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 977 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 978 rewriter.getUnknownLoc(), "free", 979 mlir::LLVM::LLVMFunctionType::get(voidType, 980 getVoidPtrType(op.getContext()), 981 /*isVarArg=*/false)); 982 } 983 984 namespace { 985 /// Lower a `fir.freemem` instruction into `llvm.call @free` 986 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 987 using FIROpConversion::FIROpConversion; 988 989 mlir::LogicalResult 990 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 991 mlir::ConversionPatternRewriter &rewriter) const override { 992 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 993 mlir::Location loc = freemem.getLoc(); 994 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 995 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 996 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 997 rewriter.create<mlir::LLVM::CallOp>( 998 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 999 rewriter.eraseOp(freemem); 1000 return success(); 1001 } 1002 }; 1003 1004 /// Convert `fir.end` 1005 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 1006 using FIROpConversion::FIROpConversion; 1007 1008 mlir::LogicalResult 1009 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 1010 mlir::ConversionPatternRewriter &rewriter) const override { 1011 TODO(firEnd.getLoc(), "fir.end codegen"); 1012 return failure(); 1013 } 1014 }; 1015 1016 /// Lower `fir.has_value` operation to `llvm.return` operation. 1017 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 1018 using FIROpConversion::FIROpConversion; 1019 1020 mlir::LogicalResult 1021 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 1022 mlir::ConversionPatternRewriter &rewriter) const override { 1023 rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(op, adaptor.getOperands()); 1024 return success(); 1025 } 1026 }; 1027 1028 /// Lower `fir.global` operation to `llvm.global` operation. 1029 /// `fir.insert_on_range` operations are replaced with constant dense attribute 1030 /// if they are applied on the full range. 1031 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 1032 using FIROpConversion::FIROpConversion; 1033 1034 mlir::LogicalResult 1035 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 1036 mlir::ConversionPatternRewriter &rewriter) const override { 1037 auto tyAttr = convertType(global.getType()); 1038 if (global.getType().isa<fir::BoxType>()) 1039 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1040 auto loc = global.getLoc(); 1041 mlir::Attribute initAttr{}; 1042 if (global.initVal()) 1043 initAttr = global.initVal().getValue(); 1044 auto linkage = convertLinkage(global.linkName()); 1045 auto isConst = global.constant().hasValue(); 1046 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 1047 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 1048 auto &gr = g.getInitializerRegion(); 1049 rewriter.inlineRegionBefore(global.region(), gr, gr.end()); 1050 if (!gr.empty()) { 1051 // Replace insert_on_range with a constant dense attribute if the 1052 // initialization is on the full range. 1053 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 1054 for (auto insertOp : insertOnRangeOps) { 1055 if (isFullRange(insertOp.coor(), insertOp.getType())) { 1056 auto seqTyAttr = convertType(insertOp.getType()); 1057 auto *op = insertOp.val().getDefiningOp(); 1058 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 1059 if (!constant) { 1060 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 1061 if (!convertOp) 1062 continue; 1063 constant = cast<mlir::arith::ConstantOp>( 1064 convertOp.value().getDefiningOp()); 1065 } 1066 mlir::Type vecType = mlir::VectorType::get( 1067 insertOp.getType().getShape(), constant.getType()); 1068 auto denseAttr = mlir::DenseElementsAttr::get( 1069 vecType.cast<ShapedType>(), constant.getValue()); 1070 rewriter.setInsertionPointAfter(insertOp); 1071 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 1072 insertOp, seqTyAttr, denseAttr); 1073 } 1074 } 1075 } 1076 rewriter.eraseOp(global); 1077 return success(); 1078 } 1079 1080 bool isFullRange(mlir::DenseIntElementsAttr indexes, 1081 fir::SequenceType seqTy) const { 1082 auto extents = seqTy.getShape(); 1083 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 1084 return false; 1085 auto cur_index = indexes.value_begin<int64_t>(); 1086 for (unsigned i = 0; i < indexes.size(); i += 2) { 1087 if (*(cur_index++) != 0) 1088 return false; 1089 if (*(cur_index++) != extents[i / 2] - 1) 1090 return false; 1091 } 1092 return true; 1093 } 1094 1095 // TODO: String comparaison should be avoided. Replace linkName with an 1096 // enumeration. 1097 mlir::LLVM::Linkage convertLinkage(Optional<StringRef> optLinkage) const { 1098 if (optLinkage.hasValue()) { 1099 auto name = optLinkage.getValue(); 1100 if (name == "internal") 1101 return mlir::LLVM::Linkage::Internal; 1102 if (name == "linkonce") 1103 return mlir::LLVM::Linkage::Linkonce; 1104 if (name == "common") 1105 return mlir::LLVM::Linkage::Common; 1106 if (name == "weak") 1107 return mlir::LLVM::Linkage::Weak; 1108 } 1109 return mlir::LLVM::Linkage::External; 1110 } 1111 }; 1112 } // namespace 1113 1114 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 1115 Optional<mlir::ValueRange> destOps, 1116 mlir::ConversionPatternRewriter &rewriter, 1117 mlir::Block *newBlock) { 1118 if (destOps.hasValue()) 1119 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 1120 newBlock, mlir::ValueRange()); 1121 else 1122 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 1123 } 1124 1125 template <typename A, typename B> 1126 static void genBrOp(A caseOp, mlir::Block *dest, Optional<B> destOps, 1127 mlir::ConversionPatternRewriter &rewriter) { 1128 if (destOps.hasValue()) 1129 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 1130 dest); 1131 else 1132 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 1133 } 1134 1135 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 1136 mlir::Block *dest, 1137 Optional<mlir::ValueRange> destOps, 1138 mlir::ConversionPatternRewriter &rewriter) { 1139 auto *thisBlock = rewriter.getInsertionBlock(); 1140 auto *newBlock = createBlock(rewriter, dest); 1141 rewriter.setInsertionPointToEnd(thisBlock); 1142 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 1143 rewriter.setInsertionPointToEnd(newBlock); 1144 } 1145 1146 namespace { 1147 /// Conversion of `fir.select_case` 1148 /// 1149 /// The `fir.select_case` operation is converted to a if-then-else ladder. 1150 /// Depending on the case condition type, one or several comparison and 1151 /// conditional branching can be generated. 1152 /// 1153 /// A a point value case such as `case(4)`, a lower bound case such as 1154 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 1155 /// simple comparison between the selector value and the constant value in the 1156 /// case. The block associated with the case condition is then executed if 1157 /// the comparison succeed otherwise it branch to the next block with the 1158 /// comparison for the the next case conditon. 1159 /// 1160 /// A closed interval case condition such as `case(7:10)` is converted with a 1161 /// first comparison and conditional branching for the lower bound. If 1162 /// successful, it branch to a second block with the comparison for the 1163 /// upper bound in the same case condition. 1164 /// 1165 /// TODO: lowering of CHARACTER type cases is not handled yet. 1166 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 1167 using FIROpConversion::FIROpConversion; 1168 1169 mlir::LogicalResult 1170 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 1171 mlir::ConversionPatternRewriter &rewriter) const override { 1172 unsigned conds = caseOp.getNumConditions(); 1173 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 1174 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 1175 auto ty = caseOp.getSelector().getType(); 1176 if (ty.isa<fir::CharacterType>()) { 1177 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 1178 return failure(); 1179 } 1180 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 1181 auto loc = caseOp.getLoc(); 1182 for (unsigned t = 0; t != conds; ++t) { 1183 mlir::Block *dest = caseOp.getSuccessor(t); 1184 llvm::Optional<mlir::ValueRange> destOps = 1185 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 1186 llvm::Optional<mlir::ValueRange> cmpOps = 1187 *caseOp.getCompareOperands(adaptor.getOperands(), t); 1188 mlir::Value caseArg = *(cmpOps.getValue().begin()); 1189 mlir::Attribute attr = cases[t]; 1190 if (attr.isa<fir::PointIntervalAttr>()) { 1191 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1192 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 1193 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1194 continue; 1195 } 1196 if (attr.isa<fir::LowerBoundAttr>()) { 1197 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1198 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 1199 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1200 continue; 1201 } 1202 if (attr.isa<fir::UpperBoundAttr>()) { 1203 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1204 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 1205 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1206 continue; 1207 } 1208 if (attr.isa<fir::ClosedIntervalAttr>()) { 1209 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1210 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 1211 auto *thisBlock = rewriter.getInsertionBlock(); 1212 auto *newBlock1 = createBlock(rewriter, dest); 1213 auto *newBlock2 = createBlock(rewriter, dest); 1214 rewriter.setInsertionPointToEnd(thisBlock); 1215 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 1216 rewriter.setInsertionPointToEnd(newBlock1); 1217 mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1); 1218 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 1219 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 1220 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 1221 rewriter.setInsertionPointToEnd(newBlock2); 1222 continue; 1223 } 1224 assert(attr.isa<mlir::UnitAttr>()); 1225 assert((t + 1 == conds) && "unit must be last"); 1226 genBrOp(caseOp, dest, destOps, rewriter); 1227 } 1228 return success(); 1229 } 1230 }; 1231 } // namespace 1232 1233 template <typename OP> 1234 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 1235 typename OP::Adaptor adaptor, 1236 mlir::ConversionPatternRewriter &rewriter) { 1237 unsigned conds = select.getNumConditions(); 1238 auto cases = select.getCases().getValue(); 1239 mlir::Value selector = adaptor.selector(); 1240 auto loc = select.getLoc(); 1241 assert(conds > 0 && "select must have cases"); 1242 1243 llvm::SmallVector<mlir::Block *> destinations; 1244 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 1245 mlir::Block *defaultDestination; 1246 mlir::ValueRange defaultOperands; 1247 llvm::SmallVector<int32_t> caseValues; 1248 1249 for (unsigned t = 0; t != conds; ++t) { 1250 mlir::Block *dest = select.getSuccessor(t); 1251 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 1252 const mlir::Attribute &attr = cases[t]; 1253 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 1254 destinations.push_back(dest); 1255 destinationsOperands.push_back(destOps.hasValue() ? *destOps 1256 : ValueRange()); 1257 caseValues.push_back(intAttr.getInt()); 1258 continue; 1259 } 1260 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 1261 assert((t + 1 == conds) && "unit must be last"); 1262 defaultDestination = dest; 1263 defaultOperands = destOps.hasValue() ? *destOps : ValueRange(); 1264 } 1265 1266 // LLVM::SwitchOp takes a i32 type for the selector. 1267 if (select.getSelector().getType() != rewriter.getI32Type()) 1268 selector = 1269 rewriter.create<LLVM::TruncOp>(loc, rewriter.getI32Type(), selector); 1270 1271 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 1272 select, selector, 1273 /*defaultDestination=*/defaultDestination, 1274 /*defaultOperands=*/defaultOperands, 1275 /*caseValues=*/caseValues, 1276 /*caseDestinations=*/destinations, 1277 /*caseOperands=*/destinationsOperands, 1278 /*branchWeights=*/ArrayRef<int32_t>()); 1279 } 1280 1281 namespace { 1282 /// conversion of fir::SelectOp to an if-then-else ladder 1283 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 1284 using FIROpConversion::FIROpConversion; 1285 1286 mlir::LogicalResult 1287 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 1288 mlir::ConversionPatternRewriter &rewriter) const override { 1289 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 1290 return success(); 1291 } 1292 }; 1293 1294 /// `fir.load` --> `llvm.load` 1295 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 1296 using FIROpConversion::FIROpConversion; 1297 1298 mlir::LogicalResult 1299 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 1300 mlir::ConversionPatternRewriter &rewriter) const override { 1301 // fir.box is a special case because it is considered as an ssa values in 1302 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 1303 // and fir.box end up being the same llvm types and loading a 1304 // fir.ref<fir.box> is actually a no op in LLVM. 1305 if (load.getType().isa<fir::BoxType>()) { 1306 rewriter.replaceOp(load, adaptor.getOperands()[0]); 1307 } else { 1308 mlir::Type ty = convertType(load.getType()); 1309 ArrayRef<NamedAttribute> at = load->getAttrs(); 1310 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 1311 load, ty, adaptor.getOperands(), at); 1312 } 1313 return success(); 1314 } 1315 }; 1316 1317 /// Lower `fir.no_reassoc` to LLVM IR dialect. 1318 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 1319 /// math flags? 1320 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 1321 using FIROpConversion::FIROpConversion; 1322 1323 mlir::LogicalResult 1324 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 1325 mlir::ConversionPatternRewriter &rewriter) const override { 1326 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 1327 return success(); 1328 } 1329 }; 1330 1331 /// Lower `fir.select_type` to LLVM IR dialect. 1332 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 1333 using FIROpConversion::FIROpConversion; 1334 1335 mlir::LogicalResult 1336 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 1337 mlir::ConversionPatternRewriter &rewriter) const override { 1338 mlir::emitError(select.getLoc(), 1339 "fir.select_type should have already been converted"); 1340 return failure(); 1341 } 1342 }; 1343 1344 /// conversion of fir::SelectRankOp to an if-then-else ladder 1345 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 1346 using FIROpConversion::FIROpConversion; 1347 1348 mlir::LogicalResult 1349 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 1350 mlir::ConversionPatternRewriter &rewriter) const override { 1351 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 1352 return success(); 1353 } 1354 }; 1355 1356 /// `fir.store` --> `llvm.store` 1357 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 1358 using FIROpConversion::FIROpConversion; 1359 1360 mlir::LogicalResult 1361 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 1362 mlir::ConversionPatternRewriter &rewriter) const override { 1363 if (store.value().getType().isa<fir::BoxType>()) { 1364 // fir.box value is actually in memory, load it first before storing it. 1365 mlir::Location loc = store.getLoc(); 1366 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 1367 auto val = rewriter.create<mlir::LLVM::LoadOp>( 1368 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 1369 adaptor.getOperands()[0]); 1370 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 1371 store, val, adaptor.getOperands()[1]); 1372 } else { 1373 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 1374 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 1375 } 1376 return success(); 1377 } 1378 }; 1379 1380 /// convert to LLVM IR dialect `undef` 1381 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 1382 using FIROpConversion::FIROpConversion; 1383 1384 mlir::LogicalResult 1385 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 1386 mlir::ConversionPatternRewriter &rewriter) const override { 1387 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 1388 undef, convertType(undef.getType())); 1389 return success(); 1390 } 1391 }; 1392 1393 /// `fir.unreachable` --> `llvm.unreachable` 1394 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 1395 using FIROpConversion::FIROpConversion; 1396 1397 mlir::LogicalResult 1398 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 1399 mlir::ConversionPatternRewriter &rewriter) const override { 1400 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 1401 return success(); 1402 } 1403 }; 1404 1405 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 1406 using FIROpConversion::FIROpConversion; 1407 1408 mlir::LogicalResult 1409 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 1410 mlir::ConversionPatternRewriter &rewriter) const override { 1411 mlir::Type ty = convertType(zero.getType()); 1412 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 1413 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 1414 } else if (ty.isa<mlir::IntegerType>()) { 1415 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 1416 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 1417 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 1418 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 1419 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 1420 } else { 1421 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 1422 return rewriter.notifyMatchFailure( 1423 zero, 1424 "conversion of fir.zero with aggregate type not implemented yet"); 1425 } 1426 return success(); 1427 } 1428 }; 1429 } // namespace 1430 1431 /// Common base class for embox to descriptor conversion. 1432 template <typename OP> 1433 struct EmboxCommonConversion : public FIROpConversion<OP> { 1434 using FIROpConversion<OP>::FIROpConversion; 1435 1436 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1437 // The order to find the LLVMFuncOp is as follows: 1438 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1439 // 2. The first ancestor that is a LLVMFuncOp. 1440 mlir::LLVM::LLVMFuncOp 1441 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1442 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1443 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1444 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1445 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1446 } 1447 1448 // Generate an alloca of size 1 and type \p toTy. 1449 mlir::LLVM::AllocaOp 1450 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1451 mlir::ConversionPatternRewriter &rewriter) const { 1452 auto thisPt = rewriter.saveInsertionPoint(); 1453 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1454 rewriter.setInsertionPointToStart(&func.front()); 1455 auto size = this->genI32Constant(loc, rewriter, 1); 1456 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1457 rewriter.restoreInsertionPoint(thisPt); 1458 return al; 1459 } 1460 1461 static int getCFIAttr(fir::BoxType boxTy) { 1462 auto eleTy = boxTy.getEleTy(); 1463 if (eleTy.isa<fir::PointerType>()) 1464 return CFI_attribute_pointer; 1465 if (eleTy.isa<fir::HeapType>()) 1466 return CFI_attribute_allocatable; 1467 return CFI_attribute_other; 1468 } 1469 1470 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1471 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1472 .template dyn_cast<fir::RecordType>(); 1473 } 1474 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1475 auto recTy = unwrapIfDerived(boxTy); 1476 return recTy && recTy.getNumLenParams() > 0; 1477 } 1478 static bool isDerivedType(fir::BoxType boxTy) { 1479 return unwrapIfDerived(boxTy) != nullptr; 1480 } 1481 1482 // Get the element size and CFI type code of the boxed value. 1483 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1484 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1485 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1486 auto doInteger = 1487 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1488 int typeCode = fir::integerBitsToTypeCode(width); 1489 return {this->genConstantOffset(loc, rewriter, width / 8), 1490 this->genConstantOffset(loc, rewriter, typeCode)}; 1491 }; 1492 auto doLogical = 1493 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1494 int typeCode = fir::logicalBitsToTypeCode(width); 1495 return {this->genConstantOffset(loc, rewriter, width / 8), 1496 this->genConstantOffset(loc, rewriter, typeCode)}; 1497 }; 1498 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1499 int typeCode = fir::realBitsToTypeCode(width); 1500 return {this->genConstantOffset(loc, rewriter, width / 8), 1501 this->genConstantOffset(loc, rewriter, typeCode)}; 1502 }; 1503 auto doComplex = 1504 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1505 auto typeCode = fir::complexBitsToTypeCode(width); 1506 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1507 this->genConstantOffset(loc, rewriter, typeCode)}; 1508 }; 1509 auto doCharacter = 1510 [&](unsigned width, 1511 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1512 auto typeCode = fir::characterBitsToTypeCode(width); 1513 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1514 if (width == 8) 1515 return {len, typeCodeVal}; 1516 auto byteWidth = this->genConstantOffset(loc, rewriter, width / 8); 1517 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1518 auto size = 1519 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len); 1520 return {size, typeCodeVal}; 1521 }; 1522 auto getKindMap = [&]() -> fir::KindMapping & { 1523 return this->lowerTy().getKindMap(); 1524 }; 1525 // Pointer-like types. 1526 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1527 boxEleTy = eleTy; 1528 // Integer types. 1529 if (fir::isa_integer(boxEleTy)) { 1530 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1531 return doInteger(ty.getWidth()); 1532 auto ty = boxEleTy.cast<fir::IntegerType>(); 1533 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1534 } 1535 // Floating point types. 1536 if (fir::isa_real(boxEleTy)) { 1537 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1538 return doFloat(ty.getWidth()); 1539 auto ty = boxEleTy.cast<fir::RealType>(); 1540 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1541 } 1542 // Complex types. 1543 if (fir::isa_complex(boxEleTy)) { 1544 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1545 return doComplex( 1546 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1547 auto ty = boxEleTy.cast<fir::ComplexType>(); 1548 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1549 } 1550 // Character types. 1551 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1552 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1553 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1554 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1555 return doCharacter(charWidth, len); 1556 } 1557 assert(!lenParams.empty()); 1558 return doCharacter(charWidth, lenParams.back()); 1559 } 1560 // Logical type. 1561 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1562 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1563 // Array types. 1564 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1565 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1566 // Derived-type types. 1567 if (boxEleTy.isa<fir::RecordType>()) { 1568 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1569 this->lowerTy().convertType(boxEleTy)); 1570 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1571 auto one = 1572 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1573 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1574 mlir::ValueRange{one}); 1575 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1576 loc, this->lowerTy().indexType(), gep); 1577 return {eleSize, 1578 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1579 } 1580 // Reference type. 1581 if (fir::isa_ref_type(boxEleTy)) { 1582 // FIXME: use the target pointer size rather than sizeof(void*) 1583 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1584 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1585 } 1586 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1587 } 1588 1589 /// Basic pattern to write a field in the descriptor 1590 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1591 mlir::Location loc, mlir::Value dest, 1592 ArrayRef<unsigned> fldIndexes, mlir::Value value, 1593 bool bitcast = false) const { 1594 auto boxTy = dest.getType(); 1595 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1596 if (bitcast) 1597 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1598 else 1599 value = this->integerCast(loc, rewriter, fldTy, value); 1600 SmallVector<mlir::Attribute, 2> attrs; 1601 for (auto i : fldIndexes) 1602 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1603 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1604 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1605 indexesAttr); 1606 } 1607 1608 inline mlir::Value 1609 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1610 mlir::Location loc, mlir::Value dest, 1611 mlir::Value base) const { 1612 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1613 /*bitCast=*/true); 1614 } 1615 1616 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1617 mlir::Location loc, mlir::Value dest, 1618 unsigned dim, mlir::Value lb) const { 1619 return insertField(rewriter, loc, dest, 1620 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1621 } 1622 1623 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1624 mlir::Location loc, mlir::Value dest, 1625 unsigned dim, mlir::Value extent) const { 1626 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1627 extent); 1628 } 1629 1630 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1631 mlir::Location loc, mlir::Value dest, 1632 unsigned dim, mlir::Value stride) const { 1633 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1634 stride); 1635 } 1636 1637 /// Get the address of the type descriptor global variable that was created by 1638 /// lowering for derived type \p recType. 1639 template <typename BOX> 1640 mlir::Value 1641 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1642 mlir::Location loc, fir::RecordType recType) const { 1643 std::string name = recType.translateNameToFrontendMangledName(); 1644 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1645 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1646 auto ty = mlir::LLVM::LLVMPointerType::get( 1647 this->lowerTy().convertType(global.getType())); 1648 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1649 global.getSymName()); 1650 } 1651 if (auto global = 1652 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1653 // The global may have already been translated to LLVM. 1654 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1655 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1656 global.getSymName()); 1657 } 1658 // The global does not exist in the current translation unit, but may be 1659 // defined elsewhere (e.g., type defined in a module). 1660 // For now, create a extern_weak symbol (will become nullptr if unresolved) 1661 // to support generating code without the front-end generated symbols. 1662 // These could be made available_externally to require the symbols to be 1663 // defined elsewhere and to cause link-time failure otherwise. 1664 auto i8Ty = rewriter.getIntegerType(8); 1665 mlir::OpBuilder modBuilder(module.getBodyRegion()); 1666 // TODO: The symbol should be lowered to constant in lowering, they are read 1667 // only. 1668 modBuilder.create<mlir::LLVM::GlobalOp>(loc, i8Ty, /*isConstant=*/false, 1669 mlir::LLVM::Linkage::ExternWeak, 1670 name, mlir::Attribute{}); 1671 auto ty = mlir::LLVM::LLVMPointerType::get(i8Ty); 1672 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, name); 1673 } 1674 1675 template <typename BOX> 1676 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1677 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1678 unsigned rank, mlir::ValueRange lenParams) const { 1679 auto loc = box.getLoc(); 1680 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1681 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1682 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1683 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1684 mlir::Value descriptor = 1685 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1686 1687 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1688 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1689 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1690 typeparams.push_back(box.substr()[1]); 1691 } 1692 1693 // Write each of the fields with the appropriate values 1694 auto [eleSize, cfiTy] = 1695 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1696 descriptor = 1697 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1698 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1699 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1700 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1701 this->genI32Constant(loc, rewriter, rank)); 1702 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1703 descriptor = 1704 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1705 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1706 const bool hasAddendum = isDerivedType(boxTy); 1707 descriptor = 1708 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1709 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1710 1711 if (hasAddendum) { 1712 auto isArray = 1713 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1714 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1715 auto typeDesc = 1716 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1717 descriptor = 1718 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1719 /*bitCast=*/true); 1720 } 1721 1722 return {boxTy, descriptor, eleSize}; 1723 } 1724 1725 /// Compute the base address of a substring given the base address of a scalar 1726 /// string and the zero based string lower bound. 1727 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1728 mlir::Location loc, mlir::Value base, 1729 mlir::Value lowerBound) const { 1730 llvm::SmallVector<mlir::Value> gepOperands; 1731 auto baseType = 1732 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1733 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1734 auto idxTy = this->lowerTy().indexType(); 1735 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1736 gepOperands.push_back(zero); 1737 } 1738 gepOperands.push_back(lowerBound); 1739 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1740 } 1741 1742 /// If the embox is not in a globalOp body, allocate storage for the box; 1743 /// store the value inside and return the generated alloca. Return the input 1744 /// value otherwise. 1745 mlir::Value 1746 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1747 mlir::Location loc, mlir::Value boxValue) const { 1748 auto *thisBlock = rewriter.getInsertionBlock(); 1749 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1750 return boxValue; 1751 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1752 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1753 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1754 return alloca; 1755 } 1756 }; 1757 1758 /// Compute the extent of a triplet slice (lb:ub:step). 1759 static mlir::Value 1760 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1761 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1762 mlir::Value step, mlir::Value zero, mlir::Type type) { 1763 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1764 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1765 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1766 // If the resulting extent is negative (`ub-lb` and `step` have different 1767 // signs), zero must be returned instead. 1768 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1769 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1770 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1771 } 1772 1773 /// Create a generic box on a memory reference. This conversions lowers the 1774 /// abstract box to the appropriate, initialized descriptor. 1775 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1776 using EmboxCommonConversion::EmboxCommonConversion; 1777 1778 mlir::LogicalResult 1779 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1780 mlir::ConversionPatternRewriter &rewriter) const override { 1781 assert(!embox.getShape() && "There should be no dims on this embox op"); 1782 auto [boxTy, dest, eleSize] = 1783 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1784 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1785 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1786 adaptor.getOperands()[0]); 1787 if (isDerivedTypeWithLenParams(boxTy)) { 1788 TODO(embox.getLoc(), 1789 "fir.embox codegen of derived with length parameters"); 1790 return failure(); 1791 } 1792 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1793 rewriter.replaceOp(embox, result); 1794 return success(); 1795 } 1796 }; 1797 1798 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1799 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1800 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1801 using FIROpConversion::FIROpConversion; 1802 1803 mlir::LogicalResult 1804 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1805 mlir::ConversionPatternRewriter &rewriter) const override { 1806 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1807 return failure(); 1808 } 1809 }; 1810 1811 /// Create a generic box on a memory reference. 1812 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1813 using EmboxCommonConversion::EmboxCommonConversion; 1814 1815 mlir::LogicalResult 1816 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1817 mlir::ConversionPatternRewriter &rewriter) const override { 1818 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1819 xbox, rewriter, xbox.getOutRank(), 1820 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1821 // Generate the triples in the dims field of the descriptor 1822 mlir::ValueRange operands = adaptor.getOperands(); 1823 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1824 mlir::Value base = operands[0]; 1825 assert(!xbox.shape().empty() && "must have a shape"); 1826 unsigned shapeOffset = xbox.shapeOffset(); 1827 bool hasShift = !xbox.shift().empty(); 1828 unsigned shiftOffset = xbox.shiftOffset(); 1829 bool hasSlice = !xbox.slice().empty(); 1830 unsigned sliceOffset = xbox.sliceOffset(); 1831 mlir::Location loc = xbox.getLoc(); 1832 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1833 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1834 mlir::Value prevDim = integerCast(loc, rewriter, i64Ty, eleSize); 1835 mlir::Value prevPtrOff = one; 1836 mlir::Type eleTy = boxTy.getEleTy(); 1837 const unsigned rank = xbox.getRank(); 1838 llvm::SmallVector<mlir::Value> gepArgs; 1839 unsigned constRows = 0; 1840 mlir::Value ptrOffset = zero; 1841 if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType())) 1842 if (auto seqTy = memEleTy.dyn_cast<fir::SequenceType>()) { 1843 mlir::Type seqEleTy = seqTy.getEleTy(); 1844 // Adjust the element scaling factor if the element is a dependent type. 1845 if (fir::hasDynamicSize(seqEleTy)) { 1846 if (fir::isa_char(seqEleTy)) { 1847 assert(xbox.lenParams().size() == 1); 1848 prevPtrOff = integerCast(loc, rewriter, i64Ty, 1849 operands[xbox.lenParamOffset()]); 1850 } else if (seqEleTy.isa<fir::RecordType>()) { 1851 TODO(loc, "generate call to calculate size of PDT"); 1852 } else { 1853 return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type"); 1854 } 1855 } else { 1856 constRows = seqTy.getConstantRows(); 1857 } 1858 } 1859 1860 bool hasSubcomp = !xbox.subcomponent().empty(); 1861 mlir::Value stepExpr; 1862 if (hasSubcomp) { 1863 // We have a subcomponent. The step value needs to be the number of 1864 // bytes per element (which is a derived type). 1865 mlir::Type ty0 = base.getType(); 1866 [[maybe_unused]] auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1867 assert(ptrTy && "expected pointer type"); 1868 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1869 assert(memEleTy && "expected fir pointer type"); 1870 auto seqTy = memEleTy.dyn_cast<fir::SequenceType>(); 1871 assert(seqTy && "expected sequence type"); 1872 mlir::Type seqEleTy = seqTy.getEleTy(); 1873 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1874 stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1875 } 1876 1877 // Process the array subspace arguments (shape, shift, etc.), if any, 1878 // translating everything to values in the descriptor wherever the entity 1879 // has a dynamic array dimension. 1880 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1881 mlir::Value extent = operands[shapeOffset]; 1882 mlir::Value outerExtent = extent; 1883 bool skipNext = false; 1884 if (hasSlice) { 1885 mlir::Value off = operands[sliceOffset]; 1886 mlir::Value adj = one; 1887 if (hasShift) 1888 adj = operands[shiftOffset]; 1889 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1890 if (constRows > 0) { 1891 gepArgs.push_back(ao); 1892 --constRows; 1893 } else { 1894 auto dimOff = 1895 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1896 ptrOffset = 1897 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1898 } 1899 if (mlir::isa_and_nonnull<fir::UndefOp>( 1900 xbox.slice()[3 * di + 1].getDefiningOp())) { 1901 // This dimension contains a scalar expression in the array slice op. 1902 // The dimension is loop invariant, will be dropped, and will not 1903 // appear in the descriptor. 1904 skipNext = true; 1905 } 1906 } 1907 if (!skipNext) { 1908 // store lower bound (normally 0) 1909 mlir::Value lb = zero; 1910 if (eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>()) { 1911 lb = one; 1912 if (hasShift) 1913 lb = operands[shiftOffset]; 1914 } 1915 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1916 1917 // store extent 1918 if (hasSlice) 1919 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1920 operands[sliceOffset + 1], 1921 operands[sliceOffset + 2], zero, i64Ty); 1922 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1923 1924 // store step (scaled by shaped extent) 1925 1926 mlir::Value step = hasSubcomp ? stepExpr : prevDim; 1927 if (hasSlice) 1928 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1929 operands[sliceOffset + 2]); 1930 dest = insertStride(rewriter, loc, dest, descIdx, step); 1931 ++descIdx; 1932 } 1933 1934 // compute the stride and offset for the next natural dimension 1935 prevDim = 1936 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent); 1937 if (constRows == 0) 1938 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1939 outerExtent); 1940 1941 // increment iterators 1942 ++shapeOffset; 1943 if (hasShift) 1944 ++shiftOffset; 1945 if (hasSlice) 1946 sliceOffset += 3; 1947 } 1948 if (hasSlice || hasSubcomp || !xbox.substr().empty()) { 1949 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1950 args.append(gepArgs.rbegin(), gepArgs.rend()); 1951 if (hasSubcomp) { 1952 // For each field in the path add the offset to base via the args list. 1953 // In the most general case, some offsets must be computed since 1954 // they are not be known until runtime. 1955 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1956 fir::unwrapPassByRefType(xbox.memref().getType())))) 1957 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1958 args.append(operands.begin() + xbox.subcomponentOffset(), 1959 operands.begin() + xbox.subcomponentOffset() + 1960 xbox.subcomponent().size()); 1961 } 1962 base = 1963 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1964 if (!xbox.substr().empty()) 1965 base = shiftSubstringBase(rewriter, loc, base, 1966 operands[xbox.substrOffset()]); 1967 } 1968 dest = insertBaseAddress(rewriter, loc, dest, base); 1969 if (isDerivedTypeWithLenParams(boxTy)) 1970 TODO(loc, "fir.embox codegen of derived with length parameters"); 1971 1972 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1973 rewriter.replaceOp(xbox, result); 1974 return success(); 1975 } 1976 }; 1977 1978 /// Create a new box given a box reference. 1979 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1980 using EmboxCommonConversion::EmboxCommonConversion; 1981 1982 mlir::LogicalResult 1983 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1984 mlir::ConversionPatternRewriter &rewriter) const override { 1985 mlir::Location loc = rebox.getLoc(); 1986 mlir::Type idxTy = lowerTy().indexType(); 1987 mlir::Value loweredBox = adaptor.getOperands()[0]; 1988 mlir::ValueRange operands = adaptor.getOperands(); 1989 1990 // Create new descriptor and fill its non-shape related data. 1991 llvm::SmallVector<mlir::Value, 2> lenParams; 1992 mlir::Type inputEleTy = getInputEleTy(rebox); 1993 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1994 mlir::Value len = 1995 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1996 if (charTy.getFKind() != 1) { 1997 mlir::Value width = 1998 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1999 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 2000 } 2001 lenParams.emplace_back(len); 2002 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 2003 if (recTy.getNumLenParams() != 0) 2004 TODO(loc, "reboxing descriptor of derived type with length parameters"); 2005 } 2006 auto [boxTy, dest, eleSize] = 2007 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 2008 2009 // Read input extents, strides, and base address 2010 llvm::SmallVector<mlir::Value> inputExtents; 2011 llvm::SmallVector<mlir::Value> inputStrides; 2012 const unsigned inputRank = rebox.getRank(); 2013 for (unsigned i = 0; i < inputRank; ++i) { 2014 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 2015 SmallVector<mlir::Value, 3> dimInfo = 2016 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 2017 inputExtents.emplace_back(dimInfo[1]); 2018 inputStrides.emplace_back(dimInfo[2]); 2019 } 2020 2021 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 2022 mlir::Value baseAddr = 2023 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 2024 2025 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 2026 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 2027 operands, rewriter); 2028 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 2029 operands, rewriter); 2030 } 2031 2032 private: 2033 /// Write resulting shape and base address in descriptor, and replace rebox 2034 /// op. 2035 mlir::LogicalResult 2036 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2037 mlir::ValueRange lbounds, mlir::ValueRange extents, 2038 mlir::ValueRange strides, 2039 mlir::ConversionPatternRewriter &rewriter) const { 2040 mlir::Location loc = rebox.getLoc(); 2041 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 2042 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 2043 unsigned dim = iter.index(); 2044 mlir::Value lb = lbounds.empty() ? one : lbounds[dim]; 2045 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 2046 dest = insertExtent(rewriter, loc, dest, dim, std::get<0>(iter.value())); 2047 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 2048 } 2049 dest = insertBaseAddress(rewriter, loc, dest, base); 2050 mlir::Value result = 2051 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 2052 rewriter.replaceOp(rebox, result); 2053 return success(); 2054 } 2055 2056 // Apply slice given the base address, extents and strides of the input box. 2057 mlir::LogicalResult 2058 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2059 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 2060 mlir::ValueRange operands, 2061 mlir::ConversionPatternRewriter &rewriter) const { 2062 mlir::Location loc = rebox.getLoc(); 2063 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 2064 mlir::Type idxTy = lowerTy().indexType(); 2065 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2066 // Apply subcomponent and substring shift on base address. 2067 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 2068 // Cast to inputEleTy* so that a GEP can be used. 2069 mlir::Type inputEleTy = getInputEleTy(rebox); 2070 auto llvmElePtrTy = 2071 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 2072 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 2073 2074 if (!rebox.subcomponent().empty()) { 2075 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 2076 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 2077 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 2078 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 2079 } 2080 if (!rebox.substr().empty()) 2081 base = shiftSubstringBase(rewriter, loc, base, 2082 operands[rebox.substrOffset()]); 2083 } 2084 2085 if (rebox.slice().empty()) 2086 // The array section is of the form array[%component][substring], keep 2087 // the input array extents and strides. 2088 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 2089 inputExtents, inputStrides, rewriter); 2090 2091 // Strides from the fir.box are in bytes. 2092 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2093 2094 // The slice is of the form array(i:j:k)[%component]. Compute new extents 2095 // and strides. 2096 llvm::SmallVector<mlir::Value> slicedExtents; 2097 llvm::SmallVector<mlir::Value> slicedStrides; 2098 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2099 const bool sliceHasOrigins = !rebox.shift().empty(); 2100 unsigned sliceOps = rebox.sliceOffset(); 2101 unsigned shiftOps = rebox.shiftOffset(); 2102 auto strideOps = inputStrides.begin(); 2103 const unsigned inputRank = inputStrides.size(); 2104 for (unsigned i = 0; i < inputRank; 2105 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 2106 mlir::Value sliceLb = 2107 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 2108 mlir::Value inputStride = *strideOps; // already idxTy 2109 // Apply origin shift: base += (lb-shift)*input_stride 2110 mlir::Value sliceOrigin = 2111 sliceHasOrigins 2112 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 2113 : one; 2114 mlir::Value diff = 2115 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 2116 mlir::Value offset = 2117 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 2118 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 2119 // Apply upper bound and step if this is a triplet. Otherwise, the 2120 // dimension is dropped and no extents/strides are computed. 2121 mlir::Value upper = operands[sliceOps + 1]; 2122 const bool isTripletSlice = 2123 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 2124 if (isTripletSlice) { 2125 mlir::Value step = 2126 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 2127 // extent = ub-lb+step/step 2128 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 2129 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 2130 sliceUb, step, zero, idxTy); 2131 slicedExtents.emplace_back(extent); 2132 // stride = step*input_stride 2133 mlir::Value stride = 2134 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 2135 slicedStrides.emplace_back(stride); 2136 } 2137 } 2138 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 2139 slicedExtents, slicedStrides, rewriter); 2140 } 2141 2142 /// Apply a new shape to the data described by a box given the base address, 2143 /// extents and strides of the box. 2144 mlir::LogicalResult 2145 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2146 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 2147 mlir::ValueRange operands, 2148 mlir::ConversionPatternRewriter &rewriter) const { 2149 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 2150 operands.begin() + rebox.shiftOffset() + 2151 rebox.shift().size()}; 2152 if (rebox.shape().empty()) { 2153 // Only setting new lower bounds. 2154 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 2155 inputStrides, rewriter); 2156 } 2157 2158 mlir::Location loc = rebox.getLoc(); 2159 // Strides from the fir.box are in bytes. 2160 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 2161 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2162 2163 llvm::SmallVector<mlir::Value> newStrides; 2164 llvm::SmallVector<mlir::Value> newExtents; 2165 mlir::Type idxTy = lowerTy().indexType(); 2166 // First stride from input box is kept. The rest is assumed contiguous 2167 // (it is not possible to reshape otherwise). If the input is scalar, 2168 // which may be OK if all new extents are ones, the stride does not 2169 // matter, use one. 2170 mlir::Value stride = inputStrides.empty() 2171 ? genConstantIndex(loc, idxTy, rewriter, 1) 2172 : inputStrides[0]; 2173 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 2174 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 2175 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 2176 newExtents.emplace_back(extent); 2177 newStrides.emplace_back(stride); 2178 // nextStride = extent * stride; 2179 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 2180 } 2181 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 2182 rewriter); 2183 } 2184 2185 /// Return scalar element type of the input box. 2186 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 2187 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 2188 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 2189 return seqTy.getEleTy(); 2190 return ty; 2191 } 2192 }; 2193 2194 // Code shared between insert_value and extract_value Ops. 2195 struct ValueOpCommon { 2196 // Translate the arguments pertaining to any multidimensional array to 2197 // row-major order for LLVM-IR. 2198 static void toRowMajor(SmallVectorImpl<mlir::Attribute> &attrs, 2199 mlir::Type ty) { 2200 assert(ty && "type is null"); 2201 const auto end = attrs.size(); 2202 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 2203 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2204 const auto dim = getDimension(seq); 2205 if (dim > 1) { 2206 auto ub = std::min(i + dim, end); 2207 std::reverse(attrs.begin() + i, attrs.begin() + ub); 2208 i += dim - 1; 2209 } 2210 ty = getArrayElementType(seq); 2211 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 2212 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 2213 } else { 2214 llvm_unreachable("index into invalid type"); 2215 } 2216 } 2217 } 2218 2219 static llvm::SmallVector<mlir::Attribute> 2220 collectIndices(mlir::ConversionPatternRewriter &rewriter, 2221 mlir::ArrayAttr arrAttr) { 2222 llvm::SmallVector<mlir::Attribute> attrs; 2223 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 2224 if (i->isa<mlir::IntegerAttr>()) { 2225 attrs.push_back(*i); 2226 } else { 2227 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 2228 ++i; 2229 auto ty = i->cast<mlir::TypeAttr>().getValue(); 2230 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 2231 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 2232 } 2233 } 2234 return attrs; 2235 } 2236 2237 private: 2238 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 2239 unsigned result = 1; 2240 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 2241 eleTy; 2242 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 2243 ++result; 2244 return result; 2245 } 2246 2247 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 2248 auto eleTy = ty.getElementType(); 2249 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2250 eleTy = arrTy.getElementType(); 2251 return eleTy; 2252 } 2253 }; 2254 2255 namespace { 2256 /// Extract a subobject value from an ssa-value of aggregate type 2257 struct ExtractValueOpConversion 2258 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 2259 public ValueOpCommon { 2260 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2261 2262 mlir::LogicalResult 2263 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 2264 mlir::ConversionPatternRewriter &rewriter) const override { 2265 auto attrs = collectIndices(rewriter, extractVal.coor()); 2266 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 2267 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 2268 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 2269 extractVal, ty, adaptor.getOperands()[0], position); 2270 return success(); 2271 } 2272 }; 2273 2274 /// InsertValue is the generalized instruction for the composition of new 2275 /// aggregate type values. 2276 struct InsertValueOpConversion 2277 : public FIROpAndTypeConversion<fir::InsertValueOp>, 2278 public ValueOpCommon { 2279 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2280 2281 mlir::LogicalResult 2282 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 2283 mlir::ConversionPatternRewriter &rewriter) const override { 2284 auto attrs = collectIndices(rewriter, insertVal.coor()); 2285 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 2286 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 2287 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2288 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 2289 position); 2290 return success(); 2291 } 2292 }; 2293 2294 /// InsertOnRange inserts a value into a sequence over a range of offsets. 2295 struct InsertOnRangeOpConversion 2296 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 2297 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2298 2299 // Increments an array of subscripts in a row major fasion. 2300 void incrementSubscripts(const SmallVector<uint64_t> &dims, 2301 SmallVector<uint64_t> &subscripts) const { 2302 for (size_t i = dims.size(); i > 0; --i) { 2303 if (++subscripts[i - 1] < dims[i - 1]) { 2304 return; 2305 } 2306 subscripts[i - 1] = 0; 2307 } 2308 } 2309 2310 mlir::LogicalResult 2311 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 2312 mlir::ConversionPatternRewriter &rewriter) const override { 2313 2314 llvm::SmallVector<uint64_t> dims; 2315 auto type = adaptor.getOperands()[0].getType(); 2316 2317 // Iteratively extract the array dimensions from the type. 2318 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2319 dims.push_back(t.getNumElements()); 2320 type = t.getElementType(); 2321 } 2322 2323 SmallVector<uint64_t> lBounds; 2324 SmallVector<uint64_t> uBounds; 2325 2326 // Unzip the upper and lower bound and convert to a row major format. 2327 mlir::DenseIntElementsAttr coor = range.coor(); 2328 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2329 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2330 uBounds.push_back(*i++); 2331 lBounds.push_back(*i); 2332 } 2333 2334 auto &subscripts = lBounds; 2335 auto loc = range.getLoc(); 2336 mlir::Value lastOp = adaptor.getOperands()[0]; 2337 mlir::Value insertVal = adaptor.getOperands()[1]; 2338 2339 auto i64Ty = rewriter.getI64Type(); 2340 while (subscripts != uBounds) { 2341 // Convert uint64_t's to Attribute's. 2342 SmallVector<mlir::Attribute> subscriptAttrs; 2343 for (const auto &subscript : subscripts) 2344 subscriptAttrs.push_back(IntegerAttr::get(i64Ty, subscript)); 2345 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2346 loc, ty, lastOp, insertVal, 2347 ArrayAttr::get(range.getContext(), subscriptAttrs)); 2348 2349 incrementSubscripts(dims, subscripts); 2350 } 2351 2352 // Convert uint64_t's to Attribute's. 2353 SmallVector<mlir::Attribute> subscriptAttrs; 2354 for (const auto &subscript : subscripts) 2355 subscriptAttrs.push_back( 2356 IntegerAttr::get(rewriter.getI64Type(), subscript)); 2357 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2358 2359 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2360 range, ty, lastOp, insertVal, 2361 ArrayAttr::get(range.getContext(), arrayRef)); 2362 2363 return success(); 2364 } 2365 }; 2366 } // namespace 2367 2368 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2369 /// shifted etc. array. 2370 /// (See the static restriction on coordinate_of.) array_coor determines the 2371 /// coordinate (location) of a specific element. 2372 struct XArrayCoorOpConversion 2373 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2374 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2375 2376 mlir::LogicalResult 2377 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2378 mlir::ConversionPatternRewriter &rewriter) const override { 2379 auto loc = coor.getLoc(); 2380 mlir::ValueRange operands = adaptor.getOperands(); 2381 unsigned rank = coor.getRank(); 2382 assert(coor.indices().size() == rank); 2383 assert(coor.shape().empty() || coor.shape().size() == rank); 2384 assert(coor.shift().empty() || coor.shift().size() == rank); 2385 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2386 mlir::Type idxTy = lowerTy().indexType(); 2387 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2388 mlir::Value prevExt = one; 2389 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2390 mlir::Value offset = zero; 2391 const bool isShifted = !coor.shift().empty(); 2392 const bool isSliced = !coor.slice().empty(); 2393 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2394 2395 auto indexOps = coor.indices().begin(); 2396 auto shapeOps = coor.shape().begin(); 2397 auto shiftOps = coor.shift().begin(); 2398 auto sliceOps = coor.slice().begin(); 2399 // For each dimension of the array, generate the offset calculation. 2400 for (unsigned i = 0; i < rank; 2401 ++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) { 2402 mlir::Value index = 2403 integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); 2404 mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, 2405 operands[coor.shiftOffset() + i]) 2406 : one; 2407 mlir::Value step = one; 2408 bool normalSlice = isSliced; 2409 // Compute zero based index in dimension i of the element, applying 2410 // potential triplets and lower bounds. 2411 if (isSliced) { 2412 mlir::Value ub = *(sliceOps + 1); 2413 normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp()); 2414 if (normalSlice) 2415 step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2)); 2416 } 2417 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2418 mlir::Value diff = 2419 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2420 if (normalSlice) { 2421 mlir::Value sliceLb = 2422 integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]); 2423 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2424 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2425 } 2426 // Update the offset given the stride and the zero based index `diff` 2427 // that was just computed. 2428 if (baseIsBoxed) { 2429 // Use stride in bytes from the descriptor. 2430 mlir::Value stride = 2431 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2432 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2433 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2434 } else { 2435 // Use stride computed at last iteration. 2436 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2437 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2438 // Compute next stride assuming contiguity of the base array 2439 // (in element number). 2440 auto nextExt = 2441 integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]); 2442 prevExt = 2443 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2444 } 2445 } 2446 2447 // Add computed offset to the base address. 2448 if (baseIsBoxed) { 2449 // Working with byte offsets. The base address is read from the fir.box. 2450 // and need to be casted to i8* to do the pointer arithmetic. 2451 mlir::Type baseTy = 2452 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2453 mlir::Value base = 2454 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2455 mlir::Type voidPtrTy = getVoidPtrType(); 2456 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2457 llvm::SmallVector<mlir::Value> args{offset}; 2458 auto addr = 2459 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2460 if (coor.subcomponent().empty()) { 2461 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2462 return success(); 2463 } 2464 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2465 args.clear(); 2466 args.push_back(zero); 2467 if (!coor.lenParams().empty()) { 2468 // If type parameters are present, then we don't want to use a GEPOp 2469 // as below, as the LLVM struct type cannot be statically defined. 2470 TODO(loc, "derived type with type parameters"); 2471 } 2472 // TODO: array offset subcomponents must be converted to LLVM's 2473 // row-major layout here. 2474 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2475 args.push_back(operands[i]); 2476 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2477 args); 2478 return success(); 2479 } 2480 2481 // The array was not boxed, so it must be contiguous. offset is therefore an 2482 // element offset and the base type is kept in the GEP unless the element 2483 // type size is itself dynamic. 2484 mlir::Value base; 2485 if (coor.subcomponent().empty()) { 2486 // No subcomponent. 2487 if (!coor.lenParams().empty()) { 2488 // Type parameters. Adjust element size explicitly. 2489 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2490 assert(eleTy && "result must be a reference-like type"); 2491 if (fir::characterWithDynamicLen(eleTy)) { 2492 assert(coor.lenParams().size() == 1); 2493 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2494 eleTy.cast<fir::CharacterType>().getFKind()); 2495 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2496 auto scaledBySize = 2497 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2498 auto length = 2499 integerCast(loc, rewriter, idxTy, 2500 adaptor.getOperands()[coor.lenParamsOffset()]); 2501 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2502 length); 2503 } else { 2504 TODO(loc, "compute size of derived type with type parameters"); 2505 } 2506 } 2507 // Cast the base address to a pointer to T. 2508 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2509 adaptor.getOperands()[0]); 2510 } else { 2511 // Operand #0 must have a pointer type. For subcomponent slicing, we 2512 // want to cast away the array type and have a plain struct type. 2513 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2514 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2515 assert(ptrTy && "expected pointer type"); 2516 mlir::Type eleTy = ptrTy.getElementType(); 2517 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2518 eleTy = arrTy.getElementType(); 2519 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2520 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2521 adaptor.getOperands()[0]); 2522 } 2523 SmallVector<mlir::Value> args = {offset}; 2524 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2525 args.push_back(operands[i]); 2526 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2527 return success(); 2528 } 2529 }; 2530 2531 // 2532 // Primitive operations on Complex types 2533 // 2534 2535 /// Generate inline code for complex addition/subtraction 2536 template <typename LLVMOP, typename OPTY> 2537 static mlir::LLVM::InsertValueOp 2538 complexSum(OPTY sumop, mlir::ValueRange opnds, 2539 mlir::ConversionPatternRewriter &rewriter, 2540 fir::LLVMTypeConverter &lowering) { 2541 mlir::Value a = opnds[0]; 2542 mlir::Value b = opnds[1]; 2543 auto loc = sumop.getLoc(); 2544 auto ctx = sumop.getContext(); 2545 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2546 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2547 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 2548 mlir::Type ty = lowering.convertType(sumop.getType()); 2549 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2550 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2551 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2552 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2553 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 2554 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 2555 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2556 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 2557 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 2558 } 2559 2560 namespace { 2561 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 2562 using FIROpConversion::FIROpConversion; 2563 2564 mlir::LogicalResult 2565 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 2566 mlir::ConversionPatternRewriter &rewriter) const override { 2567 // given: (x + iy) + (x' + iy') 2568 // result: (x + x') + i(y + y') 2569 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 2570 rewriter, lowerTy()); 2571 rewriter.replaceOp(addc, r.getResult()); 2572 return success(); 2573 } 2574 }; 2575 2576 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 2577 using FIROpConversion::FIROpConversion; 2578 2579 mlir::LogicalResult 2580 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 2581 mlir::ConversionPatternRewriter &rewriter) const override { 2582 // given: (x + iy) - (x' + iy') 2583 // result: (x - x') + i(y - y') 2584 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 2585 rewriter, lowerTy()); 2586 rewriter.replaceOp(subc, r.getResult()); 2587 return success(); 2588 } 2589 }; 2590 2591 /// Inlined complex multiply 2592 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 2593 using FIROpConversion::FIROpConversion; 2594 2595 mlir::LogicalResult 2596 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 2597 mlir::ConversionPatternRewriter &rewriter) const override { 2598 // TODO: Can we use a call to __muldc3 ? 2599 // given: (x + iy) * (x' + iy') 2600 // result: (xx'-yy')+i(xy'+yx') 2601 mlir::Value a = adaptor.getOperands()[0]; 2602 mlir::Value b = adaptor.getOperands()[1]; 2603 auto loc = mulc.getLoc(); 2604 auto *ctx = mulc.getContext(); 2605 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2606 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2607 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 2608 mlir::Type ty = convertType(mulc.getType()); 2609 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2610 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2611 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2612 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2613 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 2614 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 2615 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 2616 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 2617 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 2618 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 2619 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2620 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 2621 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 2622 rewriter.replaceOp(mulc, r0.getResult()); 2623 return success(); 2624 } 2625 }; 2626 2627 /// Inlined complex division 2628 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 2629 using FIROpConversion::FIROpConversion; 2630 2631 mlir::LogicalResult 2632 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 2633 mlir::ConversionPatternRewriter &rewriter) const override { 2634 // TODO: Can we use a call to __divdc3 instead? 2635 // Just generate inline code for now. 2636 // given: (x + iy) / (x' + iy') 2637 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 2638 mlir::Value a = adaptor.getOperands()[0]; 2639 mlir::Value b = adaptor.getOperands()[1]; 2640 auto loc = divc.getLoc(); 2641 auto *ctx = divc.getContext(); 2642 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2643 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2644 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 2645 mlir::Type ty = convertType(divc.getType()); 2646 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2647 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2648 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2649 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2650 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 2651 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 2652 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 2653 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 2654 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 2655 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 2656 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 2657 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 2658 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 2659 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 2660 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 2661 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2662 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 2663 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 2664 rewriter.replaceOp(divc, r0.getResult()); 2665 return success(); 2666 } 2667 }; 2668 2669 /// Inlined complex negation 2670 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 2671 using FIROpConversion::FIROpConversion; 2672 2673 mlir::LogicalResult 2674 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 2675 mlir::ConversionPatternRewriter &rewriter) const override { 2676 // given: -(x + iy) 2677 // result: -x - iy 2678 auto *ctxt = neg.getContext(); 2679 auto eleTy = convertType(getComplexEleTy(neg.getType())); 2680 auto ty = convertType(neg.getType()); 2681 auto loc = neg.getLoc(); 2682 mlir::Value o0 = adaptor.getOperands()[0]; 2683 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 2684 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 2685 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 2686 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 2687 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 2688 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 2689 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 2690 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 2691 return success(); 2692 } 2693 }; 2694 2695 /// Conversion pattern for operation that must be dead. The information in these 2696 /// operations is used by other operation. At this point they should not have 2697 /// anymore uses. 2698 /// These operations are normally dead after the pre-codegen pass. 2699 template <typename FromOp> 2700 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 2701 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering) 2702 : FIROpConversion<FromOp>(lowering) {} 2703 using OpAdaptor = typename FromOp::Adaptor; 2704 2705 mlir::LogicalResult 2706 matchAndRewrite(FromOp op, OpAdaptor adaptor, 2707 mlir::ConversionPatternRewriter &rewriter) const final { 2708 if (!op->getUses().empty()) 2709 return rewriter.notifyMatchFailure(op, "op must be dead"); 2710 rewriter.eraseOp(op); 2711 return success(); 2712 } 2713 }; 2714 2715 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 2716 using MustBeDeadConversion::MustBeDeadConversion; 2717 }; 2718 2719 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 2720 using MustBeDeadConversion::MustBeDeadConversion; 2721 }; 2722 2723 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 2724 using MustBeDeadConversion::MustBeDeadConversion; 2725 }; 2726 2727 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 2728 using MustBeDeadConversion::MustBeDeadConversion; 2729 }; 2730 2731 /// `fir.is_present` --> 2732 /// ``` 2733 /// %0 = llvm.mlir.constant(0 : i64) 2734 /// %1 = llvm.ptrtoint %0 2735 /// %2 = llvm.icmp "ne" %1, %0 : i64 2736 /// ``` 2737 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 2738 using FIROpConversion::FIROpConversion; 2739 2740 mlir::LogicalResult 2741 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 2742 mlir::ConversionPatternRewriter &rewriter) const override { 2743 mlir::Type idxTy = lowerTy().indexType(); 2744 mlir::Location loc = isPresent.getLoc(); 2745 auto ptr = adaptor.getOperands()[0]; 2746 2747 if (isPresent.val().getType().isa<fir::BoxCharType>()) { 2748 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 2749 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 2750 2751 mlir::Type ty = structTy.getBody()[0]; 2752 mlir::MLIRContext *ctx = isPresent.getContext(); 2753 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2754 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 2755 } 2756 mlir::LLVM::ConstantOp c0 = 2757 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 2758 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 2759 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 2760 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 2761 2762 return success(); 2763 } 2764 }; 2765 2766 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 2767 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 2768 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 2769 /// element is the length of the character buffer (`#n`). 2770 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 2771 using FIROpConversion::FIROpConversion; 2772 2773 mlir::LogicalResult 2774 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 2775 mlir::ConversionPatternRewriter &rewriter) const override { 2776 mlir::ValueRange operands = adaptor.getOperands(); 2777 MLIRContext *ctx = emboxChar.getContext(); 2778 2779 mlir::Value charBuffer = operands[0]; 2780 mlir::Value charBufferLen = operands[1]; 2781 2782 mlir::Location loc = emboxChar.getLoc(); 2783 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 2784 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 2785 2786 mlir::Type lenTy = 2787 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 2788 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 2789 2790 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2791 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2792 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2793 loc, llvmStructTy, llvmStruct, charBuffer, c0); 2794 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2795 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 2796 2797 return success(); 2798 } 2799 }; 2800 } // namespace 2801 2802 /// Construct an `llvm.extractvalue` instruction. It will return value at 2803 /// element \p x from \p tuple. 2804 static mlir::LLVM::ExtractValueOp 2805 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 2806 mlir::ConversionPatternRewriter &rewriter, 2807 mlir::MLIRContext *ctx, int x) { 2808 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 2809 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 2810 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 2811 } 2812 2813 namespace { 2814 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 2815 /// boxchar. 2816 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 2817 using FIROpConversion::FIROpConversion; 2818 2819 mlir::LogicalResult 2820 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 2821 mlir::ConversionPatternRewriter &rewriter) const override { 2822 mlir::Value boxChar = adaptor.getOperands()[0]; 2823 mlir::Location loc = boxChar.getLoc(); 2824 mlir::MLIRContext *ctx = boxChar.getContext(); 2825 mlir::Type returnValTy = boxCharLen.getResult().getType(); 2826 2827 constexpr int boxcharLenIdx = 1; 2828 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 2829 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 2830 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 2831 rewriter.replaceOp(boxCharLen, lenAfterCast); 2832 2833 return success(); 2834 } 2835 }; 2836 2837 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2838 /// the character buffer and one for the buffer length. 2839 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2840 using FIROpConversion::FIROpConversion; 2841 2842 mlir::LogicalResult 2843 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2844 mlir::ConversionPatternRewriter &rewriter) const override { 2845 MLIRContext *ctx = unboxchar.getContext(); 2846 2847 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2848 mlir::Value tuple = adaptor.getOperands()[0]; 2849 mlir::Type tupleTy = tuple.getType(); 2850 2851 mlir::Location loc = unboxchar.getLoc(); 2852 mlir::Value ptrToBuffer = 2853 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2854 2855 mlir::LLVM::ExtractValueOp len = 2856 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2857 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2858 2859 rewriter.replaceOp(unboxchar, 2860 ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2861 return success(); 2862 } 2863 }; 2864 2865 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 2866 /// components. 2867 /// TODO: Part of supporting Fortran 2003 procedure pointers. 2868 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 2869 using FIROpConversion::FIROpConversion; 2870 2871 mlir::LogicalResult 2872 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 2873 mlir::ConversionPatternRewriter &rewriter) const override { 2874 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 2875 return failure(); 2876 } 2877 }; 2878 2879 /// Convert `fir.field_index`. The conversion depends on whether the size of 2880 /// the record is static or dynamic. 2881 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2882 using FIROpConversion::FIROpConversion; 2883 2884 // NB: most field references should be resolved by this point 2885 mlir::LogicalResult 2886 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2887 mlir::ConversionPatternRewriter &rewriter) const override { 2888 auto recTy = field.on_type().cast<fir::RecordType>(); 2889 unsigned index = recTy.getFieldIndex(field.field_id()); 2890 2891 if (!fir::hasDynamicSize(recTy)) { 2892 // Derived type has compile-time constant layout. Return index of the 2893 // component type in the parent type (to be used in GEP). 2894 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2895 field.getLoc(), rewriter, index)}); 2896 return success(); 2897 } 2898 2899 // Derived type has compile-time constant layout. Call the compiler 2900 // generated function to determine the byte offset of the field at runtime. 2901 // This returns a non-constant. 2902 FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2903 field.getContext(), getOffsetMethodName(recTy, field.field_id())); 2904 NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2905 NamedAttribute fieldAttr = rewriter.getNamedAttr( 2906 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2907 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2908 field, lowerTy().offsetType(), adaptor.getOperands(), 2909 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2910 return success(); 2911 } 2912 2913 // Re-Construct the name of the compiler generated method that calculates the 2914 // offset 2915 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2916 llvm::StringRef field) { 2917 return recTy.getName().str() + "P." + field.str() + ".offset"; 2918 } 2919 }; 2920 2921 /// Convert to (memory) reference to a reference to a subobject. 2922 /// The coordinate_of op is a Swiss army knife operation that can be used on 2923 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2924 /// With unboxed arrays, there is the restriction that the array have a static 2925 /// shape in all but the last column. 2926 struct CoordinateOpConversion 2927 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2928 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2929 2930 mlir::LogicalResult 2931 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2932 mlir::ConversionPatternRewriter &rewriter) const override { 2933 mlir::ValueRange operands = adaptor.getOperands(); 2934 2935 mlir::Location loc = coor.getLoc(); 2936 mlir::Value base = operands[0]; 2937 mlir::Type baseObjectTy = coor.getBaseType(); 2938 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2939 assert(objectTy && "fir.coordinate_of expects a reference type"); 2940 2941 // Complex type - basically, extract the real or imaginary part 2942 if (fir::isa_complex(objectTy)) { 2943 mlir::LLVM::ConstantOp c0 = 2944 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2945 SmallVector<mlir::Value> offs = {c0, operands[1]}; 2946 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2947 rewriter.replaceOp(coor, gep); 2948 return success(); 2949 } 2950 2951 // Boxed type - get the base pointer from the box 2952 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2953 return doRewriteBox(coor, ty, operands, loc, rewriter); 2954 2955 // Reference or pointer type 2956 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType>()) 2957 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2958 2959 return rewriter.notifyMatchFailure( 2960 coor, "fir.coordinate_of base operand has unsupported type"); 2961 } 2962 2963 unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) const { 2964 return fir::hasDynamicSize(ty) 2965 ? op.getDefiningOp() 2966 ->getAttrOfType<mlir::IntegerAttr>("field") 2967 .getInt() 2968 : getIntValue(op); 2969 } 2970 2971 int64_t getIntValue(mlir::Value val) const { 2972 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2973 mlir::Operation *defop = val.getDefiningOp(); 2974 2975 if (auto constOp = dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2976 return constOp.value(); 2977 if (auto llConstOp = dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2978 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2979 return attr.getValue().getSExtValue(); 2980 fir::emitFatalError(val.getLoc(), "must be a constant"); 2981 } 2982 2983 bool hasSubDimensions(mlir::Type type) const { 2984 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2985 } 2986 2987 /// Check whether this form of `!fir.coordinate_of` is supported. These 2988 /// additional checks are required, because we are not yet able to convert 2989 /// all valid forms of `!fir.coordinate_of`. 2990 /// TODO: Either implement the unsupported cases or extend the verifier 2991 /// in FIROps.cpp instead. 2992 bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) const { 2993 const std::size_t numOfCoors = coors.size(); 2994 std::size_t i = 0; 2995 bool subEle = false; 2996 bool ptrEle = false; 2997 for (; i < numOfCoors; ++i) { 2998 mlir::Value nxtOpnd = coors[i]; 2999 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 3000 subEle = true; 3001 i += arrTy.getDimension() - 1; 3002 type = arrTy.getEleTy(); 3003 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 3004 subEle = true; 3005 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3006 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 3007 subEle = true; 3008 type = tupTy.getType(getIntValue(nxtOpnd)); 3009 } else { 3010 ptrEle = true; 3011 } 3012 } 3013 if (ptrEle) 3014 return (!subEle) && (numOfCoors == 1); 3015 return subEle && (i >= numOfCoors); 3016 } 3017 3018 /// Walk the abstract memory layout and determine if the path traverses any 3019 /// array types with unknown shape. Return true iff all the array types have a 3020 /// constant shape along the path. 3021 bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) const { 3022 const std::size_t sz = coors.size(); 3023 std::size_t i = 0; 3024 for (; i < sz; ++i) { 3025 mlir::Value nxtOpnd = coors[i]; 3026 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 3027 if (fir::sequenceWithNonConstantShape(arrTy)) 3028 return false; 3029 i += arrTy.getDimension() - 1; 3030 type = arrTy.getEleTy(); 3031 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 3032 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 3033 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 3034 type = strTy.getType(getIntValue(nxtOpnd)); 3035 } else { 3036 return true; 3037 } 3038 } 3039 return true; 3040 } 3041 3042 private: 3043 mlir::LogicalResult 3044 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 3045 mlir::Location loc, 3046 mlir::ConversionPatternRewriter &rewriter) const { 3047 mlir::Type boxObjTy = coor.getBaseType(); 3048 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 3049 3050 mlir::Value boxBaseAddr = operands[0]; 3051 3052 // 1. SPECIAL CASE (uses `fir.len_param_index`): 3053 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 3054 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 3055 // %addr = coordinate_of %box, %lenp 3056 if (coor.getNumOperands() == 2) { 3057 mlir::Operation *coordinateDef = (*coor.coor().begin()).getDefiningOp(); 3058 if (isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) { 3059 TODO(loc, 3060 "fir.coordinate_of - fir.len_param_index is not supported yet"); 3061 } 3062 } 3063 3064 // 2. GENERAL CASE: 3065 // 2.1. (`fir.array`) 3066 // %box = ... : !fix.box<!fir.array<?xU>> 3067 // %idx = ... : index 3068 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 3069 // 2.2 (`fir.derived`) 3070 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 3071 // %idx = ... : i32 3072 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 3073 // 2.3 (`fir.derived` inside `fir.array`) 3074 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, field_2:f32}>>> 3075 // %idx1 = ... : index 3076 // %idx2 = ... : i32 3077 // %resultAddr = coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 3078 // 2.4. TODO: Either document or disable any other case that the following 3079 // implementation might convert. 3080 mlir::LLVM::ConstantOp c0 = 3081 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 3082 mlir::Value resultAddr = 3083 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 3084 boxBaseAddr, rewriter); 3085 auto currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 3086 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 3087 3088 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 3089 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3090 if (i != 1) 3091 TODO(loc, "fir.array nested inside other array and/or derived type"); 3092 // Applies byte strides from the box. Ignore lower bound from box 3093 // since fir.coordinate_of indexes are zero based. Lowering takes care 3094 // of lower bound aspects. This both accounts for dynamically sized 3095 // types and non contiguous arrays. 3096 auto idxTy = lowerTy().indexType(); 3097 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 3098 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 3099 index < lastIndex; ++index) { 3100 mlir::Value stride = 3101 loadStrideFromBox(loc, operands[0], index - i, rewriter); 3102 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 3103 operands[index], stride); 3104 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 3105 } 3106 auto voidPtrBase = 3107 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 3108 SmallVector<mlir::Value> args{off}; 3109 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 3110 voidPtrBase, args); 3111 i += arrTy.getDimension() - 1; 3112 currentObjTy = arrTy.getEleTy(); 3113 } else if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) { 3114 auto recRefTy = 3115 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 3116 mlir::Value nxtOpnd = operands[i]; 3117 auto memObj = 3118 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 3119 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 3120 currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3121 auto llvmCurrentObjTy = lowerTy().convertType(currentObjTy); 3122 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 3123 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 3124 args); 3125 resultAddr = 3126 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 3127 } else { 3128 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 3129 } 3130 } 3131 3132 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 3133 return success(); 3134 } 3135 3136 mlir::LogicalResult 3137 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 3138 mlir::ValueRange operands, mlir::Location loc, 3139 mlir::ConversionPatternRewriter &rewriter) const { 3140 mlir::Type baseObjectTy = coor.getBaseType(); 3141 3142 mlir::Type currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 3143 bool hasSubdimension = hasSubDimensions(currentObjTy); 3144 bool columnIsDeferred = !hasSubdimension; 3145 3146 if (!supportedCoordinate(currentObjTy, operands.drop_front(1))) { 3147 TODO(loc, "unsupported combination of coordinate operands"); 3148 } 3149 3150 const bool hasKnownShape = 3151 arraysHaveKnownShape(currentObjTy, operands.drop_front(1)); 3152 3153 // If only the column is `?`, then we can simply place the column value in 3154 // the 0-th GEP position. 3155 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3156 if (!hasKnownShape) { 3157 const unsigned sz = arrTy.getDimension(); 3158 if (arraysHaveKnownShape(arrTy.getEleTy(), 3159 operands.drop_front(1 + sz))) { 3160 llvm::ArrayRef<int64_t> shape = arrTy.getShape(); 3161 bool allConst = true; 3162 for (unsigned i = 0; i < sz - 1; ++i) { 3163 if (shape[i] < 0) { 3164 allConst = false; 3165 break; 3166 } 3167 } 3168 if (allConst) 3169 columnIsDeferred = true; 3170 } 3171 } 3172 } 3173 3174 if (fir::hasDynamicSize(fir::unwrapSequenceType(currentObjTy))) { 3175 mlir::emitError( 3176 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 3177 return failure(); 3178 } 3179 3180 if (hasKnownShape || columnIsDeferred) { 3181 SmallVector<mlir::Value> offs; 3182 if (hasKnownShape && hasSubdimension) { 3183 mlir::LLVM::ConstantOp c0 = 3184 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 3185 offs.push_back(c0); 3186 } 3187 const std::size_t sz = operands.size(); 3188 Optional<int> dims; 3189 SmallVector<mlir::Value> arrIdx; 3190 for (std::size_t i = 1; i < sz; ++i) { 3191 mlir::Value nxtOpnd = operands[i]; 3192 3193 if (!currentObjTy) { 3194 mlir::emitError(loc, "invalid coordinate/check failed"); 3195 return failure(); 3196 } 3197 3198 // check if the i-th coordinate relates to an array 3199 if (dims.hasValue()) { 3200 arrIdx.push_back(nxtOpnd); 3201 int dimsLeft = *dims; 3202 if (dimsLeft > 1) { 3203 dims = dimsLeft - 1; 3204 continue; 3205 } 3206 currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy(); 3207 // append array range in reverse (FIR arrays are column-major) 3208 offs.append(arrIdx.rbegin(), arrIdx.rend()); 3209 arrIdx.clear(); 3210 dims.reset(); 3211 continue; 3212 } 3213 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3214 int d = arrTy.getDimension() - 1; 3215 if (d > 0) { 3216 dims = d; 3217 arrIdx.push_back(nxtOpnd); 3218 continue; 3219 } 3220 currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy(); 3221 offs.push_back(nxtOpnd); 3222 continue; 3223 } 3224 3225 // check if the i-th coordinate relates to a field 3226 if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) 3227 currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3228 else if (auto tupTy = currentObjTy.dyn_cast<mlir::TupleType>()) 3229 currentObjTy = tupTy.getType(getIntValue(nxtOpnd)); 3230 else 3231 currentObjTy = nullptr; 3232 3233 offs.push_back(nxtOpnd); 3234 } 3235 if (dims.hasValue()) 3236 offs.append(arrIdx.rbegin(), arrIdx.rend()); 3237 mlir::Value base = operands[0]; 3238 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 3239 rewriter.replaceOp(coor, retval); 3240 return success(); 3241 } 3242 3243 mlir::emitError(loc, "fir.coordinate_of base operand has unsupported type"); 3244 return failure(); 3245 } 3246 }; 3247 3248 } // namespace 3249 3250 namespace { 3251 /// Convert FIR dialect to LLVM dialect 3252 /// 3253 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3254 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3255 /// 3256 /// This pass is not complete yet. We are upstreaming it in small patches. 3257 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3258 public: 3259 mlir::ModuleOp getModule() { return getOperation(); } 3260 3261 void runOnOperation() override final { 3262 auto mod = getModule(); 3263 if (!forcedTargetTriple.empty()) { 3264 fir::setTargetTriple(mod, forcedTargetTriple); 3265 } 3266 3267 auto *context = getModule().getContext(); 3268 fir::LLVMTypeConverter typeConverter{getModule()}; 3269 mlir::OwningRewritePatternList pattern(context); 3270 pattern.insert< 3271 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3272 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3273 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3274 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3275 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3276 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3277 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3278 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3279 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3280 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3281 FreeMemOpConversion, HasValueOpConversion, GenTypeDescOpConversion, 3282 GlobalLenOpConversion, GlobalOpConversion, InsertOnRangeOpConversion, 3283 InsertValueOpConversion, IsPresentOpConversion, 3284 LenParamIndexOpConversion, LoadOpConversion, NegcOpConversion, 3285 NoReassocOpConversion, MulcOpConversion, SelectCaseOpConversion, 3286 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3287 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3288 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3289 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3290 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3291 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>( 3292 typeConverter); 3293 mlir::populateStdToLLVMConversionPatterns(typeConverter, pattern); 3294 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3295 pattern); 3296 mlir::ConversionTarget target{*context}; 3297 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3298 3299 // required NOPs for applying a full conversion 3300 target.addLegalOp<mlir::ModuleOp>(); 3301 3302 // apply the patterns 3303 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3304 std::move(pattern)))) { 3305 signalPassFailure(); 3306 } 3307 } 3308 }; 3309 3310 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3311 struct LLVMIRLoweringPass 3312 : public mlir::PassWrapper<LLVMIRLoweringPass, 3313 mlir::OperationPass<mlir::ModuleOp>> { 3314 using Printer = fir::LLVMIRLoweringPrinter; 3315 LLVMIRLoweringPass(raw_ostream &output, Printer p) 3316 : output{output}, printer{p} {} 3317 3318 mlir::ModuleOp getModule() { return getOperation(); } 3319 3320 void runOnOperation() override final { 3321 auto *ctx = getModule().getContext(); 3322 auto optName = getModule().getName(); 3323 llvm::LLVMContext llvmCtx; 3324 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3325 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3326 printer(*llvmModule, output); 3327 return; 3328 } 3329 3330 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3331 signalPassFailure(); 3332 } 3333 3334 private: 3335 raw_ostream &output; 3336 Printer printer; 3337 }; 3338 3339 } // namespace 3340 3341 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3342 return std::make_unique<FIRToLLVMLowering>(); 3343 } 3344 3345 std::unique_ptr<mlir::Pass> 3346 fir::createLLVMDialectToLLVMPass(raw_ostream &output, 3347 fir::LLVMIRLoweringPrinter printer) { 3348 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3349 } 3350