1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/TypeCode.h" 20 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 21 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 22 #include "mlir/Conversion/LLVMCommon/Pattern.h" 23 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 24 #include "mlir/IR/BuiltinTypes.h" 25 #include "mlir/IR/Matchers.h" 26 #include "mlir/Pass/Pass.h" 27 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 28 #include "llvm/ADT/ArrayRef.h" 29 30 #define DEBUG_TYPE "flang-codegen" 31 32 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 33 #include "TypeConverter.h" 34 35 // TODO: This should really be recovered from the specified target. 36 static constexpr unsigned defaultAlign = 8; 37 38 /// `fir.box` attribute values as defined for CFI_attribute_t in 39 /// flang/ISO_Fortran_binding.h. 40 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 41 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 42 43 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 44 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 45 } 46 47 static mlir::LLVM::ConstantOp 48 genConstantIndex(mlir::Location loc, mlir::Type ity, 49 mlir::ConversionPatternRewriter &rewriter, 50 std::int64_t offset) { 51 auto cattr = rewriter.getI64IntegerAttr(offset); 52 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 53 } 54 55 static Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 56 mlir::Block *insertBefore) { 57 assert(insertBefore && "expected valid insertion block"); 58 return rewriter.createBlock(insertBefore->getParent(), 59 mlir::Region::iterator(insertBefore)); 60 } 61 62 namespace { 63 /// FIR conversion pattern template 64 template <typename FromOp> 65 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 66 public: 67 explicit FIROpConversion(fir::LLVMTypeConverter &lowering) 68 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering) {} 69 70 protected: 71 mlir::Type convertType(mlir::Type ty) const { 72 return lowerTy().convertType(ty); 73 } 74 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 75 76 mlir::Type getVoidPtrType() const { 77 return mlir::LLVM::LLVMPointerType::get( 78 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 79 } 80 81 mlir::LLVM::ConstantOp 82 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 83 int value) const { 84 mlir::Type i32Ty = rewriter.getI32Type(); 85 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 86 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 87 } 88 89 mlir::LLVM::ConstantOp 90 genConstantOffset(mlir::Location loc, 91 mlir::ConversionPatternRewriter &rewriter, 92 int offset) const { 93 mlir::Type ity = lowerTy().offsetType(); 94 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 95 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 96 } 97 98 /// Construct code sequence to extract the specifc value from a `fir.box`. 99 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 100 mlir::Type resultTy, 101 mlir::ConversionPatternRewriter &rewriter, 102 unsigned boxValue) const { 103 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 104 mlir::LLVM::ConstantOp cValuePos = 105 genConstantOffset(loc, rewriter, boxValue); 106 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 107 auto p = rewriter.create<mlir::LLVM::GEPOp>( 108 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 109 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 110 } 111 112 /// Method to construct code sequence to get the triple for dimension `dim` 113 /// from a box. 114 SmallVector<mlir::Value, 3> 115 getDimsFromBox(mlir::Location loc, ArrayRef<mlir::Type> retTys, 116 mlir::Value box, mlir::Value dim, 117 mlir::ConversionPatternRewriter &rewriter) const { 118 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 119 mlir::LLVM::ConstantOp cDims = 120 genConstantOffset(loc, rewriter, kDimsPosInBox); 121 mlir::LLVM::LoadOp l0 = 122 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 123 mlir::LLVM::LoadOp l1 = 124 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 125 mlir::LLVM::LoadOp l2 = 126 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 127 return {l0.getResult(), l1.getResult(), l2.getResult()}; 128 } 129 130 mlir::LLVM::LoadOp 131 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 132 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 133 mlir::Type ty, 134 mlir::ConversionPatternRewriter &rewriter) const { 135 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 136 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 137 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 138 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 139 } 140 141 mlir::Value 142 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 143 mlir::ConversionPatternRewriter &rewriter) const { 144 auto idxTy = lowerTy().indexType(); 145 auto c0 = genConstantOffset(loc, rewriter, 0); 146 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 147 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 148 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 149 rewriter); 150 } 151 152 /// Read base address from a fir.box. Returned address has type ty. 153 mlir::Value 154 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 155 mlir::ConversionPatternRewriter &rewriter) const { 156 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 157 mlir::LLVM::ConstantOp cAddr = 158 genConstantOffset(loc, rewriter, kAddrPosInBox); 159 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 160 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 161 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 162 } 163 164 mlir::Value 165 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 166 mlir::ConversionPatternRewriter &rewriter) const { 167 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 168 mlir::LLVM::ConstantOp cElemLen = 169 genConstantOffset(loc, rewriter, kElemLenPosInBox); 170 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 171 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 172 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 173 } 174 175 // Load the attribute from the \p box and perform a check against \p maskValue 176 // The final comparison is implemented as `(attribute & maskValue) != 0`. 177 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 178 mlir::ConversionPatternRewriter &rewriter, 179 unsigned maskValue) const { 180 mlir::Type attrTy = rewriter.getI32Type(); 181 mlir::Value attribute = 182 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 183 mlir::LLVM::ConstantOp attrMask = 184 genConstantOffset(loc, rewriter, maskValue); 185 auto maskRes = 186 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 187 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 188 return rewriter.create<mlir::LLVM::ICmpOp>( 189 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 190 } 191 192 // Get the element type given an LLVM type that is of the form 193 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 194 static mlir::Type getBoxEleTy(mlir::Type type, 195 llvm::ArrayRef<unsigned> indexes) { 196 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 197 type = t.getElementType(); 198 for (auto i : indexes) { 199 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 200 assert(!t.isOpaque() && i < t.getBody().size()); 201 type = t.getBody()[i]; 202 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 203 type = t.getElementType(); 204 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 205 type = t.getElementType(); 206 } else { 207 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 208 "request for invalid box element type"); 209 } 210 } 211 return type; 212 } 213 214 // Return LLVM type of the base address given the LLVM type 215 // of the related descriptor (lowered fir.box type). 216 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 217 return getBoxEleTy(type, {kAddrPosInBox}); 218 } 219 220 template <typename... ARGS> 221 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 222 mlir::ConversionPatternRewriter &rewriter, 223 mlir::Value base, ARGS... args) const { 224 SmallVector<mlir::Value> cv{args...}; 225 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 226 } 227 228 /// Perform an extension or truncation as needed on an integer value. Lowering 229 /// to the specific target may involve some sign-extending or truncation of 230 /// values, particularly to fit them from abstract box types to the 231 /// appropriate reified structures. 232 mlir::Value integerCast(mlir::Location loc, 233 mlir::ConversionPatternRewriter &rewriter, 234 mlir::Type ty, mlir::Value val) const { 235 auto valTy = val.getType(); 236 // If the value was not yet lowered, lower its type so that it can 237 // be used in getPrimitiveTypeSizeInBits. 238 if (!valTy.isa<mlir::IntegerType>()) 239 valTy = convertType(valTy); 240 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 241 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 242 if (toSize < fromSize) 243 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 244 if (toSize > fromSize) 245 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 246 return val; 247 } 248 249 fir::LLVMTypeConverter &lowerTy() const { 250 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 251 } 252 }; 253 254 /// FIR conversion pattern template 255 template <typename FromOp> 256 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 257 public: 258 using FIROpConversion<FromOp>::FIROpConversion; 259 using OpAdaptor = typename FromOp::Adaptor; 260 261 mlir::LogicalResult 262 matchAndRewrite(FromOp op, OpAdaptor adaptor, 263 mlir::ConversionPatternRewriter &rewriter) const final { 264 mlir::Type ty = this->convertType(op.getType()); 265 return doRewrite(op, ty, adaptor, rewriter); 266 } 267 268 virtual mlir::LogicalResult 269 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 270 mlir::ConversionPatternRewriter &rewriter) const = 0; 271 }; 272 273 /// Create value signaling an absent optional argument in a call, e.g. 274 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 275 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 276 using FIROpConversion::FIROpConversion; 277 278 mlir::LogicalResult 279 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 280 mlir::ConversionPatternRewriter &rewriter) const override { 281 mlir::Type ty = convertType(absent.getType()); 282 mlir::Location loc = absent.getLoc(); 283 284 if (absent.getType().isa<fir::BoxCharType>()) { 285 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 286 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 287 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 288 auto nullField = 289 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 290 mlir::MLIRContext *ctx = absent.getContext(); 291 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 292 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 293 absent, ty, undefStruct, nullField, c0); 294 } else { 295 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 296 } 297 return success(); 298 } 299 }; 300 301 // Lower `fir.address_of` operation to `llvm.address_of` operation. 302 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 303 using FIROpConversion::FIROpConversion; 304 305 mlir::LogicalResult 306 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 307 mlir::ConversionPatternRewriter &rewriter) const override { 308 auto ty = convertType(addr.getType()); 309 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 310 addr, ty, addr.symbol().getRootReference().getValue()); 311 return success(); 312 } 313 }; 314 } // namespace 315 316 /// Lookup the function to compute the memory size of this parametric derived 317 /// type. The size of the object may depend on the LEN type parameters of the 318 /// derived type. 319 static mlir::LLVM::LLVMFuncOp 320 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 321 mlir::ConversionPatternRewriter &rewriter) { 322 auto module = op->getParentOfType<mlir::ModuleOp>(); 323 std::string name = recTy.getName().str() + "P.mem.size"; 324 return module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name); 325 } 326 327 namespace { 328 /// convert to LLVM IR dialect `alloca` 329 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 330 using FIROpConversion::FIROpConversion; 331 332 mlir::LogicalResult 333 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 334 mlir::ConversionPatternRewriter &rewriter) const override { 335 mlir::ValueRange operands = adaptor.getOperands(); 336 auto loc = alloc.getLoc(); 337 mlir::Type ity = lowerTy().indexType(); 338 unsigned i = 0; 339 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 340 mlir::Type ty = convertType(alloc.getType()); 341 mlir::Type resultTy = ty; 342 if (alloc.hasLenParams()) { 343 unsigned end = alloc.numLenParams(); 344 llvm::SmallVector<mlir::Value> lenParams; 345 for (; i < end; ++i) 346 lenParams.push_back(operands[i]); 347 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 348 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 349 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 350 chrTy.getContext(), chrTy.getFKind()); 351 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 352 assert(end == 1); 353 size = integerCast(loc, rewriter, ity, lenParams[0]); 354 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 355 mlir::LLVM::LLVMFuncOp memSizeFn = 356 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 357 if (!memSizeFn) 358 emitError(loc, "did not find allocation function"); 359 mlir::NamedAttribute attr = rewriter.getNamedAttr( 360 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 361 auto call = rewriter.create<mlir::LLVM::CallOp>( 362 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 363 size = call.getResult(0); 364 ty = mlir::LLVM::LLVMPointerType::get( 365 mlir::IntegerType::get(alloc.getContext(), 8)); 366 } else { 367 return emitError(loc, "unexpected type ") 368 << scalarType << " with type parameters"; 369 } 370 } 371 if (alloc.hasShapeOperands()) { 372 mlir::Type allocEleTy = fir::unwrapRefType(alloc.getType()); 373 // Scale the size by constant factors encoded in the array type. 374 // We only do this for arrays that don't have a constant interior, since 375 // those are the only ones that get decayed to a pointer to the element 376 // type. 377 if (auto seqTy = allocEleTy.dyn_cast<fir::SequenceType>()) { 378 if (!seqTy.hasConstantInterior()) { 379 fir::SequenceType::Extent constSize = 1; 380 for (auto extent : seqTy.getShape()) 381 if (extent != fir::SequenceType::getUnknownExtent()) 382 constSize *= extent; 383 mlir::Value constVal{ 384 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 385 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, constVal); 386 } 387 } 388 unsigned end = operands.size(); 389 for (; i < end; ++i) 390 size = rewriter.create<mlir::LLVM::MulOp>( 391 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 392 } 393 if (ty == resultTy) { 394 // Do not emit the bitcast if ty and resultTy are the same. 395 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 396 alloc->getAttrs()); 397 } else { 398 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 399 alloc->getAttrs()); 400 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 401 } 402 return success(); 403 } 404 }; 405 406 /// Lower `fir.box_addr` to the sequence of operations to extract the first 407 /// element of the box. 408 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 409 using FIROpConversion::FIROpConversion; 410 411 mlir::LogicalResult 412 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 413 mlir::ConversionPatternRewriter &rewriter) const override { 414 mlir::Value a = adaptor.getOperands()[0]; 415 auto loc = boxaddr.getLoc(); 416 mlir::Type ty = convertType(boxaddr.getType()); 417 if (auto argty = boxaddr.val().getType().dyn_cast<fir::BoxType>()) { 418 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 419 } else { 420 auto c0attr = rewriter.getI32IntegerAttr(0); 421 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 422 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 423 c0); 424 } 425 return success(); 426 } 427 }; 428 429 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 430 /// dimension infomartion from the boxed value. 431 /// Result in a triple set of GEPs and loads. 432 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 433 using FIROpConversion::FIROpConversion; 434 435 mlir::LogicalResult 436 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 437 mlir::ConversionPatternRewriter &rewriter) const override { 438 SmallVector<mlir::Type, 3> resultTypes = { 439 convertType(boxdims.getResult(0).getType()), 440 convertType(boxdims.getResult(1).getType()), 441 convertType(boxdims.getResult(2).getType()), 442 }; 443 auto results = 444 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 445 adaptor.getOperands()[1], rewriter); 446 rewriter.replaceOp(boxdims, results); 447 return success(); 448 } 449 }; 450 451 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 452 /// an element in the boxed value. 453 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 454 using FIROpConversion::FIROpConversion; 455 456 mlir::LogicalResult 457 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 458 mlir::ConversionPatternRewriter &rewriter) const override { 459 mlir::Value a = adaptor.getOperands()[0]; 460 auto loc = boxelesz.getLoc(); 461 auto ty = convertType(boxelesz.getType()); 462 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 463 rewriter.replaceOp(boxelesz, elemSize); 464 return success(); 465 } 466 }; 467 468 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 469 /// boxed value was from an ALLOCATABLE entity. 470 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 471 using FIROpConversion::FIROpConversion; 472 473 mlir::LogicalResult 474 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 475 mlir::ConversionPatternRewriter &rewriter) const override { 476 mlir::Value box = adaptor.getOperands()[0]; 477 auto loc = boxisalloc.getLoc(); 478 mlir::Value check = 479 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 480 rewriter.replaceOp(boxisalloc, check); 481 return success(); 482 } 483 }; 484 485 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 486 /// boxed is an array. 487 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 488 using FIROpConversion::FIROpConversion; 489 490 mlir::LogicalResult 491 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 492 mlir::ConversionPatternRewriter &rewriter) const override { 493 mlir::Value a = adaptor.getOperands()[0]; 494 auto loc = boxisarray.getLoc(); 495 auto rank = 496 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 497 auto c0 = genConstantOffset(loc, rewriter, 0); 498 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 499 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 500 return success(); 501 } 502 }; 503 504 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 505 /// boxed value was from a POINTER entity. 506 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 507 using FIROpConversion::FIROpConversion; 508 509 mlir::LogicalResult 510 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 511 mlir::ConversionPatternRewriter &rewriter) const override { 512 mlir::Value box = adaptor.getOperands()[0]; 513 auto loc = boxisptr.getLoc(); 514 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 515 rewriter.replaceOp(boxisptr, check); 516 return success(); 517 } 518 }; 519 520 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 521 /// the box. 522 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 523 using FIROpConversion::FIROpConversion; 524 525 mlir::LogicalResult 526 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 527 mlir::ConversionPatternRewriter &rewriter) const override { 528 mlir::Value a = adaptor.getOperands()[0]; 529 auto loc = boxrank.getLoc(); 530 mlir::Type ty = convertType(boxrank.getType()); 531 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 532 rewriter.replaceOp(boxrank, result); 533 return success(); 534 } 535 }; 536 537 /// Lower `fir.string_lit` to LLVM IR dialect operation. 538 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 539 using FIROpConversion::FIROpConversion; 540 541 mlir::LogicalResult 542 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 543 mlir::ConversionPatternRewriter &rewriter) const override { 544 auto ty = convertType(constop.getType()); 545 auto attr = constop.getValue(); 546 if (attr.isa<mlir::StringAttr>()) { 547 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 548 return success(); 549 } 550 551 auto arr = attr.cast<mlir::ArrayAttr>(); 552 auto charTy = constop.getType().cast<fir::CharacterType>(); 553 unsigned bits = lowerTy().characterBitsize(charTy); 554 mlir::Type intTy = rewriter.getIntegerType(bits); 555 auto attrs = llvm::map_range( 556 arr.getValue(), [intTy, bits](mlir::Attribute attr) -> Attribute { 557 return mlir::IntegerAttr::get( 558 intTy, 559 attr.cast<mlir::IntegerAttr>().getValue().sextOrTrunc(bits)); 560 }); 561 mlir::Type vecType = mlir::VectorType::get(arr.size(), intTy); 562 auto denseAttr = mlir::DenseElementsAttr::get( 563 vecType.cast<mlir::ShapedType>(), llvm::to_vector<8>(attrs)); 564 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>(constop, ty, 565 denseAttr); 566 return success(); 567 } 568 }; 569 570 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 571 /// boxproc. 572 /// TODO: Part of supporting Fortran 2003 procedure pointers. 573 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 574 using FIROpConversion::FIROpConversion; 575 576 mlir::LogicalResult 577 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 578 mlir::ConversionPatternRewriter &rewriter) const override { 579 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 580 return failure(); 581 } 582 }; 583 584 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 585 /// descriptor from the box. 586 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 587 using FIROpConversion::FIROpConversion; 588 589 mlir::LogicalResult 590 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 591 mlir::ConversionPatternRewriter &rewriter) const override { 592 mlir::Value box = adaptor.getOperands()[0]; 593 auto loc = boxtypedesc.getLoc(); 594 mlir::Type typeTy = 595 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 596 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 597 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 598 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 599 result); 600 return success(); 601 } 602 }; 603 604 // `fir.call` -> `llvm.call` 605 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 606 using FIROpConversion::FIROpConversion; 607 608 mlir::LogicalResult 609 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 610 mlir::ConversionPatternRewriter &rewriter) const override { 611 SmallVector<mlir::Type> resultTys; 612 for (auto r : call.getResults()) 613 resultTys.push_back(convertType(r.getType())); 614 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 615 call, resultTys, adaptor.getOperands(), call->getAttrs()); 616 return success(); 617 } 618 }; 619 } // namespace 620 621 static mlir::Type getComplexEleTy(mlir::Type complex) { 622 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 623 return cc.getElementType(); 624 return complex.cast<fir::ComplexType>().getElementType(); 625 } 626 627 namespace { 628 /// Compare complex values 629 /// 630 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 631 /// 632 /// For completeness, all other comparison are done on the real component only. 633 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 634 using FIROpConversion::FIROpConversion; 635 636 mlir::LogicalResult 637 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 638 mlir::ConversionPatternRewriter &rewriter) const override { 639 mlir::ValueRange operands = adaptor.getOperands(); 640 mlir::MLIRContext *ctxt = cmp.getContext(); 641 mlir::Type eleTy = convertType(getComplexEleTy(cmp.lhs().getType())); 642 mlir::Type resTy = convertType(cmp.getType()); 643 mlir::Location loc = cmp.getLoc(); 644 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 645 SmallVector<mlir::Value, 2> rp{rewriter.create<mlir::LLVM::ExtractValueOp>( 646 loc, eleTy, operands[0], pos0), 647 rewriter.create<mlir::LLVM::ExtractValueOp>( 648 loc, eleTy, operands[1], pos0)}; 649 auto rcp = 650 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 651 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 652 SmallVector<mlir::Value, 2> ip{rewriter.create<mlir::LLVM::ExtractValueOp>( 653 loc, eleTy, operands[0], pos1), 654 rewriter.create<mlir::LLVM::ExtractValueOp>( 655 loc, eleTy, operands[1], pos1)}; 656 auto icp = 657 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 658 SmallVector<mlir::Value, 2> cp{rcp, icp}; 659 switch (cmp.getPredicate()) { 660 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 661 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 662 break; 663 case mlir::arith::CmpFPredicate::UNE: // .NE. 664 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 665 break; 666 default: 667 rewriter.replaceOp(cmp, rcp.getResult()); 668 break; 669 } 670 return success(); 671 } 672 }; 673 674 /// Lower complex constants 675 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 676 using FIROpConversion::FIROpConversion; 677 678 mlir::LogicalResult 679 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 680 mlir::ConversionPatternRewriter &rewriter) const override { 681 mlir::Location loc = conc.getLoc(); 682 mlir::MLIRContext *ctx = conc.getContext(); 683 mlir::Type ty = convertType(conc.getType()); 684 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 685 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 686 auto realPart = 687 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 688 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 689 auto imPart = 690 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 691 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 692 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 693 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 694 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 695 loc, ty, undef, realPart, realIndex); 696 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 697 imPart, imIndex); 698 return success(); 699 } 700 701 inline APFloat getValue(mlir::Attribute attr) const { 702 return attr.cast<fir::RealAttr>().getValue(); 703 } 704 }; 705 706 /// convert value of from-type to value of to-type 707 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 708 using FIROpConversion::FIROpConversion; 709 710 static bool isFloatingPointTy(mlir::Type ty) { 711 return ty.isa<mlir::FloatType>(); 712 } 713 714 mlir::LogicalResult 715 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 716 mlir::ConversionPatternRewriter &rewriter) const override { 717 auto fromTy = convertType(convert.value().getType()); 718 auto toTy = convertType(convert.res().getType()); 719 mlir::Value op0 = adaptor.getOperands()[0]; 720 if (fromTy == toTy) { 721 rewriter.replaceOp(convert, op0); 722 return success(); 723 } 724 auto loc = convert.getLoc(); 725 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 726 unsigned toBits, mlir::Type toTy) -> mlir::Value { 727 if (fromBits == toBits) { 728 // TODO: Converting between two floating-point representations with the 729 // same bitwidth is not allowed for now. 730 mlir::emitError(loc, 731 "cannot implicitly convert between two floating-point " 732 "representations of the same bitwidth"); 733 return {}; 734 } 735 if (fromBits > toBits) 736 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 737 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 738 }; 739 // Complex to complex conversion. 740 if (fir::isa_complex(convert.value().getType()) && 741 fir::isa_complex(convert.res().getType())) { 742 // Special case: handle the conversion of a complex such that both the 743 // real and imaginary parts are converted together. 744 auto zero = mlir::ArrayAttr::get(convert.getContext(), 745 rewriter.getI32IntegerAttr(0)); 746 auto one = mlir::ArrayAttr::get(convert.getContext(), 747 rewriter.getI32IntegerAttr(1)); 748 auto ty = convertType(getComplexEleTy(convert.value().getType())); 749 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 750 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 751 auto nt = convertType(getComplexEleTy(convert.res().getType())); 752 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 753 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 754 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 755 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 756 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 757 auto i1 = 758 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 759 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 760 ic, one); 761 return mlir::success(); 762 } 763 // Floating point to floating point conversion. 764 if (isFloatingPointTy(fromTy)) { 765 if (isFloatingPointTy(toTy)) { 766 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 767 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 768 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 769 rewriter.replaceOp(convert, v); 770 return mlir::success(); 771 } 772 if (toTy.isa<mlir::IntegerType>()) { 773 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 774 return mlir::success(); 775 } 776 } else if (fromTy.isa<mlir::IntegerType>()) { 777 // Integer to integer conversion. 778 if (toTy.isa<mlir::IntegerType>()) { 779 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 780 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 781 assert(fromBits != toBits); 782 if (fromBits > toBits) { 783 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 784 return mlir::success(); 785 } 786 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 787 return mlir::success(); 788 } 789 // Integer to floating point conversion. 790 if (isFloatingPointTy(toTy)) { 791 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 792 return mlir::success(); 793 } 794 // Integer to pointer conversion. 795 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 796 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 797 return mlir::success(); 798 } 799 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 800 // Pointer to integer conversion. 801 if (toTy.isa<mlir::IntegerType>()) { 802 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 803 return mlir::success(); 804 } 805 // Pointer to pointer conversion. 806 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 807 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 808 return mlir::success(); 809 } 810 } 811 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 812 } 813 }; 814 815 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 816 /// table. 817 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 818 using FIROpConversion::FIROpConversion; 819 820 mlir::LogicalResult 821 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 822 mlir::ConversionPatternRewriter &rewriter) const override { 823 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 824 return failure(); 825 } 826 }; 827 828 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 829 /// derived type. 830 struct DispatchTableOpConversion 831 : public FIROpConversion<fir::DispatchTableOp> { 832 using FIROpConversion::FIROpConversion; 833 834 mlir::LogicalResult 835 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 836 mlir::ConversionPatternRewriter &rewriter) const override { 837 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 838 return failure(); 839 } 840 }; 841 842 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 843 /// method-name to a function. 844 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 845 using FIROpConversion::FIROpConversion; 846 847 mlir::LogicalResult 848 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 849 mlir::ConversionPatternRewriter &rewriter) const override { 850 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 851 return failure(); 852 } 853 }; 854 855 /// Lower `fir.global_len` operation. 856 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 857 using FIROpConversion::FIROpConversion; 858 859 mlir::LogicalResult 860 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 861 mlir::ConversionPatternRewriter &rewriter) const override { 862 TODO(globalLen.getLoc(), "fir.global_len codegen"); 863 return failure(); 864 } 865 }; 866 867 /// Lower fir.len_param_index 868 struct LenParamIndexOpConversion 869 : public FIROpConversion<fir::LenParamIndexOp> { 870 using FIROpConversion::FIROpConversion; 871 872 // FIXME: this should be specialized by the runtime target 873 mlir::LogicalResult 874 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 875 mlir::ConversionPatternRewriter &rewriter) const override { 876 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 877 } 878 }; 879 880 /// Lower `fir.gentypedesc` to a global constant. 881 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 882 using FIROpConversion::FIROpConversion; 883 884 mlir::LogicalResult 885 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 886 mlir::ConversionPatternRewriter &rewriter) const override { 887 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 888 return failure(); 889 } 890 }; 891 } // namespace 892 893 /// Return the LLVMFuncOp corresponding to the standard malloc call. 894 static mlir::LLVM::LLVMFuncOp 895 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 896 auto module = op->getParentOfType<mlir::ModuleOp>(); 897 if (mlir::LLVM::LLVMFuncOp mallocFunc = 898 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 899 return mallocFunc; 900 mlir::OpBuilder moduleBuilder( 901 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 902 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 903 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 904 rewriter.getUnknownLoc(), "malloc", 905 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 906 indexType, 907 /*isVarArg=*/false)); 908 } 909 910 /// Helper function for generating the LLVM IR that computes the size 911 /// in bytes for a derived type. 912 static mlir::Value 913 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 914 mlir::ConversionPatternRewriter &rewriter) { 915 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 916 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 917 llvm::SmallVector<mlir::Value> args{one}; 918 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 919 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 920 } 921 922 namespace { 923 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 924 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 925 using FIROpConversion::FIROpConversion; 926 927 mlir::LogicalResult 928 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 929 mlir::ConversionPatternRewriter &rewriter) const override { 930 auto heapTy = heap.getType(); 931 auto ty = convertType(heapTy); 932 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 933 mlir::Location loc = heap.getLoc(); 934 auto ity = lowerTy().indexType(); 935 auto dataTy = fir::unwrapRefType(heapTy); 936 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 937 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 938 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 939 // !fir.array<NxMx!fir.char<K,?>> sets `size` to the width of !fir.char<K>. 940 // So multiply the constant dimensions here. 941 if (fir::hasDynamicSize(dataTy)) 942 if (auto seqTy = dataTy.dyn_cast<fir::SequenceType>()) 943 if (fir::characterWithDynamicLen(seqTy.getEleTy())) { 944 fir::SequenceType::Extent arrSize = 1; 945 for (auto d : seqTy.getShape()) 946 if (d != fir::SequenceType::getUnknownExtent()) 947 arrSize *= d; 948 size = rewriter.create<mlir::LLVM::MulOp>( 949 loc, ity, size, genConstantIndex(loc, ity, rewriter, arrSize)); 950 } 951 for (mlir::Value opnd : adaptor.getOperands()) 952 size = rewriter.create<mlir::LLVM::MulOp>( 953 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 954 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 955 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 956 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 957 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 958 malloc.getResult(0)); 959 return success(); 960 } 961 962 // Compute the (allocation) size of the allocmem type in bytes. 963 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 964 mlir::ConversionPatternRewriter &rewriter, 965 mlir::Type llTy) const { 966 // Use the primitive size, if available. 967 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 968 if (auto size = 969 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 970 return genConstantIndex(loc, idxTy, rewriter, size / 8); 971 972 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 973 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 974 } 975 }; 976 } // namespace 977 978 /// Return the LLVMFuncOp corresponding to the standard free call. 979 static mlir::LLVM::LLVMFuncOp 980 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 981 auto module = op->getParentOfType<mlir::ModuleOp>(); 982 if (mlir::LLVM::LLVMFuncOp freeFunc = 983 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 984 return freeFunc; 985 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 986 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 987 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 988 rewriter.getUnknownLoc(), "free", 989 mlir::LLVM::LLVMFunctionType::get(voidType, 990 getVoidPtrType(op.getContext()), 991 /*isVarArg=*/false)); 992 } 993 994 namespace { 995 /// Lower a `fir.freemem` instruction into `llvm.call @free` 996 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 997 using FIROpConversion::FIROpConversion; 998 999 mlir::LogicalResult 1000 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1001 mlir::ConversionPatternRewriter &rewriter) const override { 1002 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1003 mlir::Location loc = freemem.getLoc(); 1004 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1005 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1006 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1007 rewriter.create<mlir::LLVM::CallOp>( 1008 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1009 rewriter.eraseOp(freemem); 1010 return success(); 1011 } 1012 }; 1013 1014 /// Convert `fir.end` 1015 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 1016 using FIROpConversion::FIROpConversion; 1017 1018 mlir::LogicalResult 1019 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 1020 mlir::ConversionPatternRewriter &rewriter) const override { 1021 TODO(firEnd.getLoc(), "fir.end codegen"); 1022 return failure(); 1023 } 1024 }; 1025 1026 /// Lower `fir.has_value` operation to `llvm.return` operation. 1027 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 1028 using FIROpConversion::FIROpConversion; 1029 1030 mlir::LogicalResult 1031 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 1032 mlir::ConversionPatternRewriter &rewriter) const override { 1033 rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(op, adaptor.getOperands()); 1034 return success(); 1035 } 1036 }; 1037 1038 /// Lower `fir.global` operation to `llvm.global` operation. 1039 /// `fir.insert_on_range` operations are replaced with constant dense attribute 1040 /// if they are applied on the full range. 1041 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 1042 using FIROpConversion::FIROpConversion; 1043 1044 mlir::LogicalResult 1045 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 1046 mlir::ConversionPatternRewriter &rewriter) const override { 1047 auto tyAttr = convertType(global.getType()); 1048 if (global.getType().isa<fir::BoxType>()) 1049 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1050 auto loc = global.getLoc(); 1051 mlir::Attribute initAttr{}; 1052 if (global.initVal()) 1053 initAttr = global.initVal().getValue(); 1054 auto linkage = convertLinkage(global.linkName()); 1055 auto isConst = global.constant().hasValue(); 1056 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 1057 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 1058 auto &gr = g.getInitializerRegion(); 1059 rewriter.inlineRegionBefore(global.region(), gr, gr.end()); 1060 if (!gr.empty()) { 1061 // Replace insert_on_range with a constant dense attribute if the 1062 // initialization is on the full range. 1063 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 1064 for (auto insertOp : insertOnRangeOps) { 1065 if (isFullRange(insertOp.coor(), insertOp.getType())) { 1066 auto seqTyAttr = convertType(insertOp.getType()); 1067 auto *op = insertOp.val().getDefiningOp(); 1068 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 1069 if (!constant) { 1070 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 1071 if (!convertOp) 1072 continue; 1073 constant = cast<mlir::arith::ConstantOp>( 1074 convertOp.value().getDefiningOp()); 1075 } 1076 mlir::Type vecType = mlir::VectorType::get( 1077 insertOp.getType().getShape(), constant.getType()); 1078 auto denseAttr = mlir::DenseElementsAttr::get( 1079 vecType.cast<ShapedType>(), constant.getValue()); 1080 rewriter.setInsertionPointAfter(insertOp); 1081 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 1082 insertOp, seqTyAttr, denseAttr); 1083 } 1084 } 1085 } 1086 rewriter.eraseOp(global); 1087 return success(); 1088 } 1089 1090 bool isFullRange(mlir::DenseIntElementsAttr indexes, 1091 fir::SequenceType seqTy) const { 1092 auto extents = seqTy.getShape(); 1093 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 1094 return false; 1095 auto cur_index = indexes.value_begin<int64_t>(); 1096 for (unsigned i = 0; i < indexes.size(); i += 2) { 1097 if (*(cur_index++) != 0) 1098 return false; 1099 if (*(cur_index++) != extents[i / 2] - 1) 1100 return false; 1101 } 1102 return true; 1103 } 1104 1105 // TODO: String comparaison should be avoided. Replace linkName with an 1106 // enumeration. 1107 mlir::LLVM::Linkage convertLinkage(Optional<StringRef> optLinkage) const { 1108 if (optLinkage.hasValue()) { 1109 auto name = optLinkage.getValue(); 1110 if (name == "internal") 1111 return mlir::LLVM::Linkage::Internal; 1112 if (name == "linkonce") 1113 return mlir::LLVM::Linkage::Linkonce; 1114 if (name == "common") 1115 return mlir::LLVM::Linkage::Common; 1116 if (name == "weak") 1117 return mlir::LLVM::Linkage::Weak; 1118 } 1119 return mlir::LLVM::Linkage::External; 1120 } 1121 }; 1122 } // namespace 1123 1124 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 1125 Optional<mlir::ValueRange> destOps, 1126 mlir::ConversionPatternRewriter &rewriter, 1127 mlir::Block *newBlock) { 1128 if (destOps.hasValue()) 1129 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 1130 newBlock, mlir::ValueRange()); 1131 else 1132 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 1133 } 1134 1135 template <typename A, typename B> 1136 static void genBrOp(A caseOp, mlir::Block *dest, Optional<B> destOps, 1137 mlir::ConversionPatternRewriter &rewriter) { 1138 if (destOps.hasValue()) 1139 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 1140 dest); 1141 else 1142 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 1143 } 1144 1145 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 1146 mlir::Block *dest, 1147 Optional<mlir::ValueRange> destOps, 1148 mlir::ConversionPatternRewriter &rewriter) { 1149 auto *thisBlock = rewriter.getInsertionBlock(); 1150 auto *newBlock = createBlock(rewriter, dest); 1151 rewriter.setInsertionPointToEnd(thisBlock); 1152 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 1153 rewriter.setInsertionPointToEnd(newBlock); 1154 } 1155 1156 namespace { 1157 /// Conversion of `fir.select_case` 1158 /// 1159 /// The `fir.select_case` operation is converted to a if-then-else ladder. 1160 /// Depending on the case condition type, one or several comparison and 1161 /// conditional branching can be generated. 1162 /// 1163 /// A a point value case such as `case(4)`, a lower bound case such as 1164 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 1165 /// simple comparison between the selector value and the constant value in the 1166 /// case. The block associated with the case condition is then executed if 1167 /// the comparison succeed otherwise it branch to the next block with the 1168 /// comparison for the the next case conditon. 1169 /// 1170 /// A closed interval case condition such as `case(7:10)` is converted with a 1171 /// first comparison and conditional branching for the lower bound. If 1172 /// successful, it branch to a second block with the comparison for the 1173 /// upper bound in the same case condition. 1174 /// 1175 /// TODO: lowering of CHARACTER type cases is not handled yet. 1176 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 1177 using FIROpConversion::FIROpConversion; 1178 1179 mlir::LogicalResult 1180 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 1181 mlir::ConversionPatternRewriter &rewriter) const override { 1182 unsigned conds = caseOp.getNumConditions(); 1183 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 1184 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 1185 auto ty = caseOp.getSelector().getType(); 1186 if (ty.isa<fir::CharacterType>()) { 1187 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 1188 return failure(); 1189 } 1190 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 1191 auto loc = caseOp.getLoc(); 1192 for (unsigned t = 0; t != conds; ++t) { 1193 mlir::Block *dest = caseOp.getSuccessor(t); 1194 llvm::Optional<mlir::ValueRange> destOps = 1195 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 1196 llvm::Optional<mlir::ValueRange> cmpOps = 1197 *caseOp.getCompareOperands(adaptor.getOperands(), t); 1198 mlir::Value caseArg = *(cmpOps.getValue().begin()); 1199 mlir::Attribute attr = cases[t]; 1200 if (attr.isa<fir::PointIntervalAttr>()) { 1201 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1202 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 1203 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1204 continue; 1205 } 1206 if (attr.isa<fir::LowerBoundAttr>()) { 1207 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1208 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 1209 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1210 continue; 1211 } 1212 if (attr.isa<fir::UpperBoundAttr>()) { 1213 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1214 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 1215 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1216 continue; 1217 } 1218 if (attr.isa<fir::ClosedIntervalAttr>()) { 1219 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1220 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 1221 auto *thisBlock = rewriter.getInsertionBlock(); 1222 auto *newBlock1 = createBlock(rewriter, dest); 1223 auto *newBlock2 = createBlock(rewriter, dest); 1224 rewriter.setInsertionPointToEnd(thisBlock); 1225 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 1226 rewriter.setInsertionPointToEnd(newBlock1); 1227 mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1); 1228 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 1229 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 1230 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 1231 rewriter.setInsertionPointToEnd(newBlock2); 1232 continue; 1233 } 1234 assert(attr.isa<mlir::UnitAttr>()); 1235 assert((t + 1 == conds) && "unit must be last"); 1236 genBrOp(caseOp, dest, destOps, rewriter); 1237 } 1238 return success(); 1239 } 1240 }; 1241 } // namespace 1242 1243 template <typename OP> 1244 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 1245 typename OP::Adaptor adaptor, 1246 mlir::ConversionPatternRewriter &rewriter) { 1247 unsigned conds = select.getNumConditions(); 1248 auto cases = select.getCases().getValue(); 1249 mlir::Value selector = adaptor.selector(); 1250 auto loc = select.getLoc(); 1251 assert(conds > 0 && "select must have cases"); 1252 1253 llvm::SmallVector<mlir::Block *> destinations; 1254 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 1255 mlir::Block *defaultDestination; 1256 mlir::ValueRange defaultOperands; 1257 llvm::SmallVector<int32_t> caseValues; 1258 1259 for (unsigned t = 0; t != conds; ++t) { 1260 mlir::Block *dest = select.getSuccessor(t); 1261 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 1262 const mlir::Attribute &attr = cases[t]; 1263 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 1264 destinations.push_back(dest); 1265 destinationsOperands.push_back(destOps.hasValue() ? *destOps 1266 : ValueRange()); 1267 caseValues.push_back(intAttr.getInt()); 1268 continue; 1269 } 1270 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 1271 assert((t + 1 == conds) && "unit must be last"); 1272 defaultDestination = dest; 1273 defaultOperands = destOps.hasValue() ? *destOps : ValueRange(); 1274 } 1275 1276 // LLVM::SwitchOp takes a i32 type for the selector. 1277 if (select.getSelector().getType() != rewriter.getI32Type()) 1278 selector = 1279 rewriter.create<LLVM::TruncOp>(loc, rewriter.getI32Type(), selector); 1280 1281 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 1282 select, selector, 1283 /*defaultDestination=*/defaultDestination, 1284 /*defaultOperands=*/defaultOperands, 1285 /*caseValues=*/caseValues, 1286 /*caseDestinations=*/destinations, 1287 /*caseOperands=*/destinationsOperands, 1288 /*branchWeights=*/ArrayRef<int32_t>()); 1289 } 1290 1291 namespace { 1292 /// conversion of fir::SelectOp to an if-then-else ladder 1293 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 1294 using FIROpConversion::FIROpConversion; 1295 1296 mlir::LogicalResult 1297 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 1298 mlir::ConversionPatternRewriter &rewriter) const override { 1299 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 1300 return success(); 1301 } 1302 }; 1303 1304 /// `fir.load` --> `llvm.load` 1305 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 1306 using FIROpConversion::FIROpConversion; 1307 1308 mlir::LogicalResult 1309 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 1310 mlir::ConversionPatternRewriter &rewriter) const override { 1311 // fir.box is a special case because it is considered as an ssa values in 1312 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 1313 // and fir.box end up being the same llvm types and loading a 1314 // fir.ref<fir.box> is actually a no op in LLVM. 1315 if (load.getType().isa<fir::BoxType>()) { 1316 rewriter.replaceOp(load, adaptor.getOperands()[0]); 1317 } else { 1318 mlir::Type ty = convertType(load.getType()); 1319 ArrayRef<NamedAttribute> at = load->getAttrs(); 1320 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 1321 load, ty, adaptor.getOperands(), at); 1322 } 1323 return success(); 1324 } 1325 }; 1326 1327 /// Lower `fir.no_reassoc` to LLVM IR dialect. 1328 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 1329 /// math flags? 1330 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 1331 using FIROpConversion::FIROpConversion; 1332 1333 mlir::LogicalResult 1334 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 1335 mlir::ConversionPatternRewriter &rewriter) const override { 1336 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 1337 return success(); 1338 } 1339 }; 1340 1341 /// Lower `fir.select_type` to LLVM IR dialect. 1342 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 1343 using FIROpConversion::FIROpConversion; 1344 1345 mlir::LogicalResult 1346 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 1347 mlir::ConversionPatternRewriter &rewriter) const override { 1348 mlir::emitError(select.getLoc(), 1349 "fir.select_type should have already been converted"); 1350 return failure(); 1351 } 1352 }; 1353 1354 /// conversion of fir::SelectRankOp to an if-then-else ladder 1355 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 1356 using FIROpConversion::FIROpConversion; 1357 1358 mlir::LogicalResult 1359 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 1360 mlir::ConversionPatternRewriter &rewriter) const override { 1361 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 1362 return success(); 1363 } 1364 }; 1365 1366 /// `fir.store` --> `llvm.store` 1367 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 1368 using FIROpConversion::FIROpConversion; 1369 1370 mlir::LogicalResult 1371 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 1372 mlir::ConversionPatternRewriter &rewriter) const override { 1373 if (store.value().getType().isa<fir::BoxType>()) { 1374 // fir.box value is actually in memory, load it first before storing it. 1375 mlir::Location loc = store.getLoc(); 1376 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 1377 auto val = rewriter.create<mlir::LLVM::LoadOp>( 1378 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 1379 adaptor.getOperands()[0]); 1380 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 1381 store, val, adaptor.getOperands()[1]); 1382 } else { 1383 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 1384 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 1385 } 1386 return success(); 1387 } 1388 }; 1389 1390 /// convert to LLVM IR dialect `undef` 1391 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 1392 using FIROpConversion::FIROpConversion; 1393 1394 mlir::LogicalResult 1395 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 1396 mlir::ConversionPatternRewriter &rewriter) const override { 1397 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 1398 undef, convertType(undef.getType())); 1399 return success(); 1400 } 1401 }; 1402 1403 /// `fir.unreachable` --> `llvm.unreachable` 1404 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 1405 using FIROpConversion::FIROpConversion; 1406 1407 mlir::LogicalResult 1408 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 1409 mlir::ConversionPatternRewriter &rewriter) const override { 1410 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 1411 return success(); 1412 } 1413 }; 1414 1415 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 1416 using FIROpConversion::FIROpConversion; 1417 1418 mlir::LogicalResult 1419 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 1420 mlir::ConversionPatternRewriter &rewriter) const override { 1421 mlir::Type ty = convertType(zero.getType()); 1422 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 1423 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 1424 } else if (ty.isa<mlir::IntegerType>()) { 1425 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 1426 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 1427 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 1428 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 1429 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 1430 } else { 1431 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 1432 return rewriter.notifyMatchFailure( 1433 zero, 1434 "conversion of fir.zero with aggregate type not implemented yet"); 1435 } 1436 return success(); 1437 } 1438 }; 1439 } // namespace 1440 1441 /// Common base class for embox to descriptor conversion. 1442 template <typename OP> 1443 struct EmboxCommonConversion : public FIROpConversion<OP> { 1444 using FIROpConversion<OP>::FIROpConversion; 1445 1446 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1447 // The order to find the LLVMFuncOp is as follows: 1448 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1449 // 2. The first ancestor that is a LLVMFuncOp. 1450 mlir::LLVM::LLVMFuncOp 1451 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1452 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1453 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1454 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1455 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1456 } 1457 1458 // Generate an alloca of size 1 and type \p toTy. 1459 mlir::LLVM::AllocaOp 1460 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1461 mlir::ConversionPatternRewriter &rewriter) const { 1462 auto thisPt = rewriter.saveInsertionPoint(); 1463 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1464 rewriter.setInsertionPointToStart(&func.front()); 1465 auto size = this->genI32Constant(loc, rewriter, 1); 1466 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1467 rewriter.restoreInsertionPoint(thisPt); 1468 return al; 1469 } 1470 1471 static int getCFIAttr(fir::BoxType boxTy) { 1472 auto eleTy = boxTy.getEleTy(); 1473 if (eleTy.isa<fir::PointerType>()) 1474 return CFI_attribute_pointer; 1475 if (eleTy.isa<fir::HeapType>()) 1476 return CFI_attribute_allocatable; 1477 return CFI_attribute_other; 1478 } 1479 1480 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1481 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1482 .template dyn_cast<fir::RecordType>(); 1483 } 1484 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1485 auto recTy = unwrapIfDerived(boxTy); 1486 return recTy && recTy.getNumLenParams() > 0; 1487 } 1488 static bool isDerivedType(fir::BoxType boxTy) { 1489 return unwrapIfDerived(boxTy) != nullptr; 1490 } 1491 1492 // Get the element size and CFI type code of the boxed value. 1493 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1494 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1495 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1496 auto doInteger = 1497 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1498 int typeCode = fir::integerBitsToTypeCode(width); 1499 return {this->genConstantOffset(loc, rewriter, width / 8), 1500 this->genConstantOffset(loc, rewriter, typeCode)}; 1501 }; 1502 auto doLogical = 1503 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1504 int typeCode = fir::logicalBitsToTypeCode(width); 1505 return {this->genConstantOffset(loc, rewriter, width / 8), 1506 this->genConstantOffset(loc, rewriter, typeCode)}; 1507 }; 1508 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1509 int typeCode = fir::realBitsToTypeCode(width); 1510 return {this->genConstantOffset(loc, rewriter, width / 8), 1511 this->genConstantOffset(loc, rewriter, typeCode)}; 1512 }; 1513 auto doComplex = 1514 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1515 auto typeCode = fir::complexBitsToTypeCode(width); 1516 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1517 this->genConstantOffset(loc, rewriter, typeCode)}; 1518 }; 1519 auto doCharacter = 1520 [&](unsigned width, 1521 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1522 auto typeCode = fir::characterBitsToTypeCode(width); 1523 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1524 if (width == 8) 1525 return {len, typeCodeVal}; 1526 auto byteWidth = this->genConstantOffset(loc, rewriter, width / 8); 1527 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1528 auto size = 1529 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len); 1530 return {size, typeCodeVal}; 1531 }; 1532 auto getKindMap = [&]() -> fir::KindMapping & { 1533 return this->lowerTy().getKindMap(); 1534 }; 1535 // Pointer-like types. 1536 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1537 boxEleTy = eleTy; 1538 // Integer types. 1539 if (fir::isa_integer(boxEleTy)) { 1540 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1541 return doInteger(ty.getWidth()); 1542 auto ty = boxEleTy.cast<fir::IntegerType>(); 1543 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1544 } 1545 // Floating point types. 1546 if (fir::isa_real(boxEleTy)) { 1547 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1548 return doFloat(ty.getWidth()); 1549 auto ty = boxEleTy.cast<fir::RealType>(); 1550 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1551 } 1552 // Complex types. 1553 if (fir::isa_complex(boxEleTy)) { 1554 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1555 return doComplex( 1556 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1557 auto ty = boxEleTy.cast<fir::ComplexType>(); 1558 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1559 } 1560 // Character types. 1561 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1562 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1563 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1564 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1565 return doCharacter(charWidth, len); 1566 } 1567 assert(!lenParams.empty()); 1568 return doCharacter(charWidth, lenParams.back()); 1569 } 1570 // Logical type. 1571 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1572 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1573 // Array types. 1574 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1575 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1576 // Derived-type types. 1577 if (boxEleTy.isa<fir::RecordType>()) { 1578 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1579 this->lowerTy().convertType(boxEleTy)); 1580 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1581 auto one = 1582 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1583 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1584 mlir::ValueRange{one}); 1585 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1586 loc, this->lowerTy().indexType(), gep); 1587 return {eleSize, 1588 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1589 } 1590 // Reference type. 1591 if (fir::isa_ref_type(boxEleTy)) { 1592 // FIXME: use the target pointer size rather than sizeof(void*) 1593 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1594 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1595 } 1596 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1597 } 1598 1599 /// Basic pattern to write a field in the descriptor 1600 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1601 mlir::Location loc, mlir::Value dest, 1602 ArrayRef<unsigned> fldIndexes, mlir::Value value, 1603 bool bitcast = false) const { 1604 auto boxTy = dest.getType(); 1605 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1606 if (bitcast) 1607 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1608 else 1609 value = this->integerCast(loc, rewriter, fldTy, value); 1610 SmallVector<mlir::Attribute, 2> attrs; 1611 for (auto i : fldIndexes) 1612 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1613 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1614 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1615 indexesAttr); 1616 } 1617 1618 inline mlir::Value 1619 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1620 mlir::Location loc, mlir::Value dest, 1621 mlir::Value base) const { 1622 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1623 /*bitCast=*/true); 1624 } 1625 1626 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1627 mlir::Location loc, mlir::Value dest, 1628 unsigned dim, mlir::Value lb) const { 1629 return insertField(rewriter, loc, dest, 1630 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1631 } 1632 1633 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1634 mlir::Location loc, mlir::Value dest, 1635 unsigned dim, mlir::Value extent) const { 1636 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1637 extent); 1638 } 1639 1640 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1641 mlir::Location loc, mlir::Value dest, 1642 unsigned dim, mlir::Value stride) const { 1643 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1644 stride); 1645 } 1646 1647 /// Get the address of the type descriptor global variable that was created by 1648 /// lowering for derived type \p recType. 1649 template <typename BOX> 1650 mlir::Value 1651 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1652 mlir::Location loc, fir::RecordType recType) const { 1653 std::string name = recType.translateNameToFrontendMangledName(); 1654 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1655 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1656 auto ty = mlir::LLVM::LLVMPointerType::get( 1657 this->lowerTy().convertType(global.getType())); 1658 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1659 global.getSymName()); 1660 } 1661 if (auto global = 1662 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1663 // The global may have already been translated to LLVM. 1664 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1665 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1666 global.getSymName()); 1667 } 1668 // The global does not exist in the current translation unit, but may be 1669 // defined elsewhere (e.g., type defined in a module). 1670 // For now, create a extern_weak symbol (will become nullptr if unresolved) 1671 // to support generating code without the front-end generated symbols. 1672 // These could be made available_externally to require the symbols to be 1673 // defined elsewhere and to cause link-time failure otherwise. 1674 auto i8Ty = rewriter.getIntegerType(8); 1675 mlir::OpBuilder modBuilder(module.getBodyRegion()); 1676 // TODO: The symbol should be lowered to constant in lowering, they are read 1677 // only. 1678 modBuilder.create<mlir::LLVM::GlobalOp>(loc, i8Ty, /*isConstant=*/false, 1679 mlir::LLVM::Linkage::ExternWeak, 1680 name, mlir::Attribute{}); 1681 auto ty = mlir::LLVM::LLVMPointerType::get(i8Ty); 1682 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, name); 1683 } 1684 1685 template <typename BOX> 1686 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1687 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1688 unsigned rank, mlir::ValueRange lenParams) const { 1689 auto loc = box.getLoc(); 1690 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1691 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1692 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1693 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1694 mlir::Value descriptor = 1695 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1696 1697 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1698 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1699 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1700 typeparams.push_back(box.substr()[1]); 1701 } 1702 1703 // Write each of the fields with the appropriate values 1704 auto [eleSize, cfiTy] = 1705 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1706 descriptor = 1707 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1708 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1709 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1710 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1711 this->genI32Constant(loc, rewriter, rank)); 1712 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1713 descriptor = 1714 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1715 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1716 const bool hasAddendum = isDerivedType(boxTy); 1717 descriptor = 1718 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1719 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1720 1721 if (hasAddendum) { 1722 auto isArray = 1723 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1724 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1725 auto typeDesc = 1726 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1727 descriptor = 1728 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1729 /*bitCast=*/true); 1730 } 1731 1732 return {boxTy, descriptor, eleSize}; 1733 } 1734 1735 /// Compute the base address of a substring given the base address of a scalar 1736 /// string and the zero based string lower bound. 1737 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1738 mlir::Location loc, mlir::Value base, 1739 mlir::Value lowerBound) const { 1740 llvm::SmallVector<mlir::Value> gepOperands; 1741 auto baseType = 1742 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1743 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1744 auto idxTy = this->lowerTy().indexType(); 1745 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1746 gepOperands.push_back(zero); 1747 } 1748 gepOperands.push_back(lowerBound); 1749 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1750 } 1751 1752 /// If the embox is not in a globalOp body, allocate storage for the box; 1753 /// store the value inside and return the generated alloca. Return the input 1754 /// value otherwise. 1755 mlir::Value 1756 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1757 mlir::Location loc, mlir::Value boxValue) const { 1758 auto *thisBlock = rewriter.getInsertionBlock(); 1759 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1760 return boxValue; 1761 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1762 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1763 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1764 return alloca; 1765 } 1766 }; 1767 1768 /// Compute the extent of a triplet slice (lb:ub:step). 1769 static mlir::Value 1770 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1771 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1772 mlir::Value step, mlir::Value zero, mlir::Type type) { 1773 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1774 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1775 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1776 // If the resulting extent is negative (`ub-lb` and `step` have different 1777 // signs), zero must be returned instead. 1778 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1779 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1780 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1781 } 1782 1783 /// Create a generic box on a memory reference. This conversions lowers the 1784 /// abstract box to the appropriate, initialized descriptor. 1785 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1786 using EmboxCommonConversion::EmboxCommonConversion; 1787 1788 mlir::LogicalResult 1789 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1790 mlir::ConversionPatternRewriter &rewriter) const override { 1791 assert(!embox.getShape() && "There should be no dims on this embox op"); 1792 auto [boxTy, dest, eleSize] = 1793 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1794 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1795 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1796 adaptor.getOperands()[0]); 1797 if (isDerivedTypeWithLenParams(boxTy)) { 1798 TODO(embox.getLoc(), 1799 "fir.embox codegen of derived with length parameters"); 1800 return failure(); 1801 } 1802 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1803 rewriter.replaceOp(embox, result); 1804 return success(); 1805 } 1806 }; 1807 1808 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1809 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1810 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1811 using FIROpConversion::FIROpConversion; 1812 1813 mlir::LogicalResult 1814 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1815 mlir::ConversionPatternRewriter &rewriter) const override { 1816 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1817 return failure(); 1818 } 1819 }; 1820 1821 /// Create a generic box on a memory reference. 1822 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1823 using EmboxCommonConversion::EmboxCommonConversion; 1824 1825 mlir::LogicalResult 1826 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1827 mlir::ConversionPatternRewriter &rewriter) const override { 1828 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1829 xbox, rewriter, xbox.getOutRank(), 1830 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1831 // Generate the triples in the dims field of the descriptor 1832 mlir::ValueRange operands = adaptor.getOperands(); 1833 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1834 mlir::Value base = operands[0]; 1835 assert(!xbox.shape().empty() && "must have a shape"); 1836 unsigned shapeOffset = xbox.shapeOffset(); 1837 bool hasShift = !xbox.shift().empty(); 1838 unsigned shiftOffset = xbox.shiftOffset(); 1839 bool hasSlice = !xbox.slice().empty(); 1840 unsigned sliceOffset = xbox.sliceOffset(); 1841 mlir::Location loc = xbox.getLoc(); 1842 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1843 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1844 mlir::Value prevDim = integerCast(loc, rewriter, i64Ty, eleSize); 1845 mlir::Value prevPtrOff = one; 1846 mlir::Type eleTy = boxTy.getEleTy(); 1847 const unsigned rank = xbox.getRank(); 1848 llvm::SmallVector<mlir::Value> gepArgs; 1849 unsigned constRows = 0; 1850 mlir::Value ptrOffset = zero; 1851 if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType())) 1852 if (auto seqTy = memEleTy.dyn_cast<fir::SequenceType>()) { 1853 mlir::Type seqEleTy = seqTy.getEleTy(); 1854 // Adjust the element scaling factor if the element is a dependent type. 1855 if (fir::hasDynamicSize(seqEleTy)) { 1856 if (fir::isa_char(seqEleTy)) { 1857 assert(xbox.lenParams().size() == 1); 1858 prevPtrOff = integerCast(loc, rewriter, i64Ty, 1859 operands[xbox.lenParamOffset()]); 1860 } else if (seqEleTy.isa<fir::RecordType>()) { 1861 TODO(loc, "generate call to calculate size of PDT"); 1862 } else { 1863 return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type"); 1864 } 1865 } else { 1866 constRows = seqTy.getConstantRows(); 1867 } 1868 } 1869 1870 bool hasSubcomp = !xbox.subcomponent().empty(); 1871 mlir::Value stepExpr; 1872 if (hasSubcomp) { 1873 // We have a subcomponent. The step value needs to be the number of 1874 // bytes per element (which is a derived type). 1875 mlir::Type ty0 = base.getType(); 1876 [[maybe_unused]] auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1877 assert(ptrTy && "expected pointer type"); 1878 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1879 assert(memEleTy && "expected fir pointer type"); 1880 auto seqTy = memEleTy.dyn_cast<fir::SequenceType>(); 1881 assert(seqTy && "expected sequence type"); 1882 mlir::Type seqEleTy = seqTy.getEleTy(); 1883 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1884 stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1885 } 1886 1887 // Process the array subspace arguments (shape, shift, etc.), if any, 1888 // translating everything to values in the descriptor wherever the entity 1889 // has a dynamic array dimension. 1890 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1891 mlir::Value extent = operands[shapeOffset]; 1892 mlir::Value outerExtent = extent; 1893 bool skipNext = false; 1894 if (hasSlice) { 1895 mlir::Value off = operands[sliceOffset]; 1896 mlir::Value adj = one; 1897 if (hasShift) 1898 adj = operands[shiftOffset]; 1899 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1900 if (constRows > 0) { 1901 gepArgs.push_back(ao); 1902 --constRows; 1903 } else { 1904 auto dimOff = 1905 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1906 ptrOffset = 1907 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1908 } 1909 if (mlir::isa_and_nonnull<fir::UndefOp>( 1910 xbox.slice()[3 * di + 1].getDefiningOp())) { 1911 // This dimension contains a scalar expression in the array slice op. 1912 // The dimension is loop invariant, will be dropped, and will not 1913 // appear in the descriptor. 1914 skipNext = true; 1915 } 1916 } 1917 if (!skipNext) { 1918 // store lower bound (normally 0) 1919 mlir::Value lb = zero; 1920 if (eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>()) { 1921 lb = one; 1922 if (hasShift) 1923 lb = operands[shiftOffset]; 1924 } 1925 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1926 1927 // store extent 1928 if (hasSlice) 1929 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1930 operands[sliceOffset + 1], 1931 operands[sliceOffset + 2], zero, i64Ty); 1932 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1933 1934 // store step (scaled by shaped extent) 1935 1936 mlir::Value step = hasSubcomp ? stepExpr : prevDim; 1937 if (hasSlice) 1938 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1939 operands[sliceOffset + 2]); 1940 dest = insertStride(rewriter, loc, dest, descIdx, step); 1941 ++descIdx; 1942 } 1943 1944 // compute the stride and offset for the next natural dimension 1945 prevDim = 1946 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent); 1947 if (constRows == 0) 1948 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1949 outerExtent); 1950 1951 // increment iterators 1952 ++shapeOffset; 1953 if (hasShift) 1954 ++shiftOffset; 1955 if (hasSlice) 1956 sliceOffset += 3; 1957 } 1958 if (hasSlice || hasSubcomp || !xbox.substr().empty()) { 1959 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1960 args.append(gepArgs.rbegin(), gepArgs.rend()); 1961 if (hasSubcomp) { 1962 // For each field in the path add the offset to base via the args list. 1963 // In the most general case, some offsets must be computed since 1964 // they are not be known until runtime. 1965 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1966 fir::unwrapPassByRefType(xbox.memref().getType())))) 1967 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1968 args.append(operands.begin() + xbox.subcomponentOffset(), 1969 operands.begin() + xbox.subcomponentOffset() + 1970 xbox.subcomponent().size()); 1971 } 1972 base = 1973 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1974 if (!xbox.substr().empty()) 1975 base = shiftSubstringBase(rewriter, loc, base, 1976 operands[xbox.substrOffset()]); 1977 } 1978 dest = insertBaseAddress(rewriter, loc, dest, base); 1979 if (isDerivedTypeWithLenParams(boxTy)) 1980 TODO(loc, "fir.embox codegen of derived with length parameters"); 1981 1982 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1983 rewriter.replaceOp(xbox, result); 1984 return success(); 1985 } 1986 }; 1987 1988 /// Create a new box given a box reference. 1989 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1990 using EmboxCommonConversion::EmboxCommonConversion; 1991 1992 mlir::LogicalResult 1993 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1994 mlir::ConversionPatternRewriter &rewriter) const override { 1995 mlir::Location loc = rebox.getLoc(); 1996 mlir::Type idxTy = lowerTy().indexType(); 1997 mlir::Value loweredBox = adaptor.getOperands()[0]; 1998 mlir::ValueRange operands = adaptor.getOperands(); 1999 2000 // Create new descriptor and fill its non-shape related data. 2001 llvm::SmallVector<mlir::Value, 2> lenParams; 2002 mlir::Type inputEleTy = getInputEleTy(rebox); 2003 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 2004 mlir::Value len = 2005 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 2006 if (charTy.getFKind() != 1) { 2007 mlir::Value width = 2008 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 2009 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 2010 } 2011 lenParams.emplace_back(len); 2012 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 2013 if (recTy.getNumLenParams() != 0) 2014 TODO(loc, "reboxing descriptor of derived type with length parameters"); 2015 } 2016 auto [boxTy, dest, eleSize] = 2017 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 2018 2019 // Read input extents, strides, and base address 2020 llvm::SmallVector<mlir::Value> inputExtents; 2021 llvm::SmallVector<mlir::Value> inputStrides; 2022 const unsigned inputRank = rebox.getRank(); 2023 for (unsigned i = 0; i < inputRank; ++i) { 2024 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 2025 SmallVector<mlir::Value, 3> dimInfo = 2026 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 2027 inputExtents.emplace_back(dimInfo[1]); 2028 inputStrides.emplace_back(dimInfo[2]); 2029 } 2030 2031 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 2032 mlir::Value baseAddr = 2033 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 2034 2035 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 2036 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 2037 operands, rewriter); 2038 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 2039 operands, rewriter); 2040 } 2041 2042 private: 2043 /// Write resulting shape and base address in descriptor, and replace rebox 2044 /// op. 2045 mlir::LogicalResult 2046 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2047 mlir::ValueRange lbounds, mlir::ValueRange extents, 2048 mlir::ValueRange strides, 2049 mlir::ConversionPatternRewriter &rewriter) const { 2050 mlir::Location loc = rebox.getLoc(); 2051 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 2052 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 2053 unsigned dim = iter.index(); 2054 mlir::Value lb = lbounds.empty() ? one : lbounds[dim]; 2055 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 2056 dest = insertExtent(rewriter, loc, dest, dim, std::get<0>(iter.value())); 2057 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 2058 } 2059 dest = insertBaseAddress(rewriter, loc, dest, base); 2060 mlir::Value result = 2061 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 2062 rewriter.replaceOp(rebox, result); 2063 return success(); 2064 } 2065 2066 // Apply slice given the base address, extents and strides of the input box. 2067 mlir::LogicalResult 2068 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2069 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 2070 mlir::ValueRange operands, 2071 mlir::ConversionPatternRewriter &rewriter) const { 2072 mlir::Location loc = rebox.getLoc(); 2073 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 2074 mlir::Type idxTy = lowerTy().indexType(); 2075 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2076 // Apply subcomponent and substring shift on base address. 2077 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 2078 // Cast to inputEleTy* so that a GEP can be used. 2079 mlir::Type inputEleTy = getInputEleTy(rebox); 2080 auto llvmElePtrTy = 2081 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 2082 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 2083 2084 if (!rebox.subcomponent().empty()) { 2085 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 2086 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 2087 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 2088 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 2089 } 2090 if (!rebox.substr().empty()) 2091 base = shiftSubstringBase(rewriter, loc, base, 2092 operands[rebox.substrOffset()]); 2093 } 2094 2095 if (rebox.slice().empty()) 2096 // The array section is of the form array[%component][substring], keep 2097 // the input array extents and strides. 2098 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 2099 inputExtents, inputStrides, rewriter); 2100 2101 // Strides from the fir.box are in bytes. 2102 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2103 2104 // The slice is of the form array(i:j:k)[%component]. Compute new extents 2105 // and strides. 2106 llvm::SmallVector<mlir::Value> slicedExtents; 2107 llvm::SmallVector<mlir::Value> slicedStrides; 2108 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2109 const bool sliceHasOrigins = !rebox.shift().empty(); 2110 unsigned sliceOps = rebox.sliceOffset(); 2111 unsigned shiftOps = rebox.shiftOffset(); 2112 auto strideOps = inputStrides.begin(); 2113 const unsigned inputRank = inputStrides.size(); 2114 for (unsigned i = 0; i < inputRank; 2115 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 2116 mlir::Value sliceLb = 2117 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 2118 mlir::Value inputStride = *strideOps; // already idxTy 2119 // Apply origin shift: base += (lb-shift)*input_stride 2120 mlir::Value sliceOrigin = 2121 sliceHasOrigins 2122 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 2123 : one; 2124 mlir::Value diff = 2125 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 2126 mlir::Value offset = 2127 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 2128 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 2129 // Apply upper bound and step if this is a triplet. Otherwise, the 2130 // dimension is dropped and no extents/strides are computed. 2131 mlir::Value upper = operands[sliceOps + 1]; 2132 const bool isTripletSlice = 2133 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 2134 if (isTripletSlice) { 2135 mlir::Value step = 2136 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 2137 // extent = ub-lb+step/step 2138 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 2139 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 2140 sliceUb, step, zero, idxTy); 2141 slicedExtents.emplace_back(extent); 2142 // stride = step*input_stride 2143 mlir::Value stride = 2144 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 2145 slicedStrides.emplace_back(stride); 2146 } 2147 } 2148 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 2149 slicedExtents, slicedStrides, rewriter); 2150 } 2151 2152 /// Apply a new shape to the data described by a box given the base address, 2153 /// extents and strides of the box. 2154 mlir::LogicalResult 2155 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2156 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 2157 mlir::ValueRange operands, 2158 mlir::ConversionPatternRewriter &rewriter) const { 2159 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 2160 operands.begin() + rebox.shiftOffset() + 2161 rebox.shift().size()}; 2162 if (rebox.shape().empty()) { 2163 // Only setting new lower bounds. 2164 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 2165 inputStrides, rewriter); 2166 } 2167 2168 mlir::Location loc = rebox.getLoc(); 2169 // Strides from the fir.box are in bytes. 2170 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 2171 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2172 2173 llvm::SmallVector<mlir::Value> newStrides; 2174 llvm::SmallVector<mlir::Value> newExtents; 2175 mlir::Type idxTy = lowerTy().indexType(); 2176 // First stride from input box is kept. The rest is assumed contiguous 2177 // (it is not possible to reshape otherwise). If the input is scalar, 2178 // which may be OK if all new extents are ones, the stride does not 2179 // matter, use one. 2180 mlir::Value stride = inputStrides.empty() 2181 ? genConstantIndex(loc, idxTy, rewriter, 1) 2182 : inputStrides[0]; 2183 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 2184 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 2185 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 2186 newExtents.emplace_back(extent); 2187 newStrides.emplace_back(stride); 2188 // nextStride = extent * stride; 2189 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 2190 } 2191 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 2192 rewriter); 2193 } 2194 2195 /// Return scalar element type of the input box. 2196 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 2197 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 2198 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 2199 return seqTy.getEleTy(); 2200 return ty; 2201 } 2202 }; 2203 2204 // Code shared between insert_value and extract_value Ops. 2205 struct ValueOpCommon { 2206 // Translate the arguments pertaining to any multidimensional array to 2207 // row-major order for LLVM-IR. 2208 static void toRowMajor(SmallVectorImpl<mlir::Attribute> &attrs, 2209 mlir::Type ty) { 2210 assert(ty && "type is null"); 2211 const auto end = attrs.size(); 2212 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 2213 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2214 const auto dim = getDimension(seq); 2215 if (dim > 1) { 2216 auto ub = std::min(i + dim, end); 2217 std::reverse(attrs.begin() + i, attrs.begin() + ub); 2218 i += dim - 1; 2219 } 2220 ty = getArrayElementType(seq); 2221 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 2222 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 2223 } else { 2224 llvm_unreachable("index into invalid type"); 2225 } 2226 } 2227 } 2228 2229 static llvm::SmallVector<mlir::Attribute> 2230 collectIndices(mlir::ConversionPatternRewriter &rewriter, 2231 mlir::ArrayAttr arrAttr) { 2232 llvm::SmallVector<mlir::Attribute> attrs; 2233 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 2234 if (i->isa<mlir::IntegerAttr>()) { 2235 attrs.push_back(*i); 2236 } else { 2237 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 2238 ++i; 2239 auto ty = i->cast<mlir::TypeAttr>().getValue(); 2240 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 2241 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 2242 } 2243 } 2244 return attrs; 2245 } 2246 2247 private: 2248 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 2249 unsigned result = 1; 2250 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 2251 eleTy; 2252 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 2253 ++result; 2254 return result; 2255 } 2256 2257 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 2258 auto eleTy = ty.getElementType(); 2259 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2260 eleTy = arrTy.getElementType(); 2261 return eleTy; 2262 } 2263 }; 2264 2265 namespace { 2266 /// Extract a subobject value from an ssa-value of aggregate type 2267 struct ExtractValueOpConversion 2268 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 2269 public ValueOpCommon { 2270 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2271 2272 mlir::LogicalResult 2273 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 2274 mlir::ConversionPatternRewriter &rewriter) const override { 2275 auto attrs = collectIndices(rewriter, extractVal.coor()); 2276 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 2277 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 2278 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 2279 extractVal, ty, adaptor.getOperands()[0], position); 2280 return success(); 2281 } 2282 }; 2283 2284 /// InsertValue is the generalized instruction for the composition of new 2285 /// aggregate type values. 2286 struct InsertValueOpConversion 2287 : public FIROpAndTypeConversion<fir::InsertValueOp>, 2288 public ValueOpCommon { 2289 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2290 2291 mlir::LogicalResult 2292 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 2293 mlir::ConversionPatternRewriter &rewriter) const override { 2294 auto attrs = collectIndices(rewriter, insertVal.coor()); 2295 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 2296 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 2297 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2298 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 2299 position); 2300 return success(); 2301 } 2302 }; 2303 2304 /// InsertOnRange inserts a value into a sequence over a range of offsets. 2305 struct InsertOnRangeOpConversion 2306 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 2307 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2308 2309 // Increments an array of subscripts in a row major fasion. 2310 void incrementSubscripts(const SmallVector<uint64_t> &dims, 2311 SmallVector<uint64_t> &subscripts) const { 2312 for (size_t i = dims.size(); i > 0; --i) { 2313 if (++subscripts[i - 1] < dims[i - 1]) { 2314 return; 2315 } 2316 subscripts[i - 1] = 0; 2317 } 2318 } 2319 2320 mlir::LogicalResult 2321 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 2322 mlir::ConversionPatternRewriter &rewriter) const override { 2323 2324 llvm::SmallVector<uint64_t> dims; 2325 auto type = adaptor.getOperands()[0].getType(); 2326 2327 // Iteratively extract the array dimensions from the type. 2328 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2329 dims.push_back(t.getNumElements()); 2330 type = t.getElementType(); 2331 } 2332 2333 SmallVector<uint64_t> lBounds; 2334 SmallVector<uint64_t> uBounds; 2335 2336 // Unzip the upper and lower bound and convert to a row major format. 2337 mlir::DenseIntElementsAttr coor = range.coor(); 2338 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2339 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2340 uBounds.push_back(*i++); 2341 lBounds.push_back(*i); 2342 } 2343 2344 auto &subscripts = lBounds; 2345 auto loc = range.getLoc(); 2346 mlir::Value lastOp = adaptor.getOperands()[0]; 2347 mlir::Value insertVal = adaptor.getOperands()[1]; 2348 2349 auto i64Ty = rewriter.getI64Type(); 2350 while (subscripts != uBounds) { 2351 // Convert uint64_t's to Attribute's. 2352 SmallVector<mlir::Attribute> subscriptAttrs; 2353 for (const auto &subscript : subscripts) 2354 subscriptAttrs.push_back(IntegerAttr::get(i64Ty, subscript)); 2355 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2356 loc, ty, lastOp, insertVal, 2357 ArrayAttr::get(range.getContext(), subscriptAttrs)); 2358 2359 incrementSubscripts(dims, subscripts); 2360 } 2361 2362 // Convert uint64_t's to Attribute's. 2363 SmallVector<mlir::Attribute> subscriptAttrs; 2364 for (const auto &subscript : subscripts) 2365 subscriptAttrs.push_back( 2366 IntegerAttr::get(rewriter.getI64Type(), subscript)); 2367 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2368 2369 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2370 range, ty, lastOp, insertVal, 2371 ArrayAttr::get(range.getContext(), arrayRef)); 2372 2373 return success(); 2374 } 2375 }; 2376 } // namespace 2377 2378 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2379 /// shifted etc. array. 2380 /// (See the static restriction on coordinate_of.) array_coor determines the 2381 /// coordinate (location) of a specific element. 2382 struct XArrayCoorOpConversion 2383 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2384 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2385 2386 mlir::LogicalResult 2387 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2388 mlir::ConversionPatternRewriter &rewriter) const override { 2389 auto loc = coor.getLoc(); 2390 mlir::ValueRange operands = adaptor.getOperands(); 2391 unsigned rank = coor.getRank(); 2392 assert(coor.indices().size() == rank); 2393 assert(coor.shape().empty() || coor.shape().size() == rank); 2394 assert(coor.shift().empty() || coor.shift().size() == rank); 2395 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2396 mlir::Type idxTy = lowerTy().indexType(); 2397 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2398 mlir::Value prevExt = one; 2399 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2400 mlir::Value offset = zero; 2401 const bool isShifted = !coor.shift().empty(); 2402 const bool isSliced = !coor.slice().empty(); 2403 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2404 2405 auto indexOps = coor.indices().begin(); 2406 auto shapeOps = coor.shape().begin(); 2407 auto shiftOps = coor.shift().begin(); 2408 auto sliceOps = coor.slice().begin(); 2409 // For each dimension of the array, generate the offset calculation. 2410 for (unsigned i = 0; i < rank; 2411 ++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) { 2412 mlir::Value index = 2413 integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); 2414 mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, 2415 operands[coor.shiftOffset() + i]) 2416 : one; 2417 mlir::Value step = one; 2418 bool normalSlice = isSliced; 2419 // Compute zero based index in dimension i of the element, applying 2420 // potential triplets and lower bounds. 2421 if (isSliced) { 2422 mlir::Value ub = *(sliceOps + 1); 2423 normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp()); 2424 if (normalSlice) 2425 step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2)); 2426 } 2427 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2428 mlir::Value diff = 2429 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2430 if (normalSlice) { 2431 mlir::Value sliceLb = 2432 integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]); 2433 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2434 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2435 } 2436 // Update the offset given the stride and the zero based index `diff` 2437 // that was just computed. 2438 if (baseIsBoxed) { 2439 // Use stride in bytes from the descriptor. 2440 mlir::Value stride = 2441 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2442 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2443 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2444 } else { 2445 // Use stride computed at last iteration. 2446 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2447 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2448 // Compute next stride assuming contiguity of the base array 2449 // (in element number). 2450 auto nextExt = 2451 integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]); 2452 prevExt = 2453 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2454 } 2455 } 2456 2457 // Add computed offset to the base address. 2458 if (baseIsBoxed) { 2459 // Working with byte offsets. The base address is read from the fir.box. 2460 // and need to be casted to i8* to do the pointer arithmetic. 2461 mlir::Type baseTy = 2462 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2463 mlir::Value base = 2464 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2465 mlir::Type voidPtrTy = getVoidPtrType(); 2466 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2467 llvm::SmallVector<mlir::Value> args{offset}; 2468 auto addr = 2469 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2470 if (coor.subcomponent().empty()) { 2471 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2472 return success(); 2473 } 2474 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2475 args.clear(); 2476 args.push_back(zero); 2477 if (!coor.lenParams().empty()) { 2478 // If type parameters are present, then we don't want to use a GEPOp 2479 // as below, as the LLVM struct type cannot be statically defined. 2480 TODO(loc, "derived type with type parameters"); 2481 } 2482 // TODO: array offset subcomponents must be converted to LLVM's 2483 // row-major layout here. 2484 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2485 args.push_back(operands[i]); 2486 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2487 args); 2488 return success(); 2489 } 2490 2491 // The array was not boxed, so it must be contiguous. offset is therefore an 2492 // element offset and the base type is kept in the GEP unless the element 2493 // type size is itself dynamic. 2494 mlir::Value base; 2495 if (coor.subcomponent().empty()) { 2496 // No subcomponent. 2497 if (!coor.lenParams().empty()) { 2498 // Type parameters. Adjust element size explicitly. 2499 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2500 assert(eleTy && "result must be a reference-like type"); 2501 if (fir::characterWithDynamicLen(eleTy)) { 2502 assert(coor.lenParams().size() == 1); 2503 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2504 eleTy.cast<fir::CharacterType>().getFKind()); 2505 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2506 auto scaledBySize = 2507 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2508 auto length = 2509 integerCast(loc, rewriter, idxTy, 2510 adaptor.getOperands()[coor.lenParamsOffset()]); 2511 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2512 length); 2513 } else { 2514 TODO(loc, "compute size of derived type with type parameters"); 2515 } 2516 } 2517 // Cast the base address to a pointer to T. 2518 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2519 adaptor.getOperands()[0]); 2520 } else { 2521 // Operand #0 must have a pointer type. For subcomponent slicing, we 2522 // want to cast away the array type and have a plain struct type. 2523 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2524 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2525 assert(ptrTy && "expected pointer type"); 2526 mlir::Type eleTy = ptrTy.getElementType(); 2527 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2528 eleTy = arrTy.getElementType(); 2529 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2530 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2531 adaptor.getOperands()[0]); 2532 } 2533 SmallVector<mlir::Value> args = {offset}; 2534 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2535 args.push_back(operands[i]); 2536 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2537 return success(); 2538 } 2539 }; 2540 2541 // 2542 // Primitive operations on Complex types 2543 // 2544 2545 /// Generate inline code for complex addition/subtraction 2546 template <typename LLVMOP, typename OPTY> 2547 static mlir::LLVM::InsertValueOp 2548 complexSum(OPTY sumop, mlir::ValueRange opnds, 2549 mlir::ConversionPatternRewriter &rewriter, 2550 fir::LLVMTypeConverter &lowering) { 2551 mlir::Value a = opnds[0]; 2552 mlir::Value b = opnds[1]; 2553 auto loc = sumop.getLoc(); 2554 auto ctx = sumop.getContext(); 2555 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2556 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2557 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 2558 mlir::Type ty = lowering.convertType(sumop.getType()); 2559 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2560 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2561 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2562 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2563 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 2564 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 2565 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2566 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 2567 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 2568 } 2569 2570 namespace { 2571 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 2572 using FIROpConversion::FIROpConversion; 2573 2574 mlir::LogicalResult 2575 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 2576 mlir::ConversionPatternRewriter &rewriter) const override { 2577 // given: (x + iy) + (x' + iy') 2578 // result: (x + x') + i(y + y') 2579 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 2580 rewriter, lowerTy()); 2581 rewriter.replaceOp(addc, r.getResult()); 2582 return success(); 2583 } 2584 }; 2585 2586 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 2587 using FIROpConversion::FIROpConversion; 2588 2589 mlir::LogicalResult 2590 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 2591 mlir::ConversionPatternRewriter &rewriter) const override { 2592 // given: (x + iy) - (x' + iy') 2593 // result: (x - x') + i(y - y') 2594 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 2595 rewriter, lowerTy()); 2596 rewriter.replaceOp(subc, r.getResult()); 2597 return success(); 2598 } 2599 }; 2600 2601 /// Inlined complex multiply 2602 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 2603 using FIROpConversion::FIROpConversion; 2604 2605 mlir::LogicalResult 2606 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 2607 mlir::ConversionPatternRewriter &rewriter) const override { 2608 // TODO: Can we use a call to __muldc3 ? 2609 // given: (x + iy) * (x' + iy') 2610 // result: (xx'-yy')+i(xy'+yx') 2611 mlir::Value a = adaptor.getOperands()[0]; 2612 mlir::Value b = adaptor.getOperands()[1]; 2613 auto loc = mulc.getLoc(); 2614 auto *ctx = mulc.getContext(); 2615 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2616 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2617 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 2618 mlir::Type ty = convertType(mulc.getType()); 2619 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2620 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2621 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2622 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2623 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 2624 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 2625 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 2626 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 2627 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 2628 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 2629 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2630 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 2631 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 2632 rewriter.replaceOp(mulc, r0.getResult()); 2633 return success(); 2634 } 2635 }; 2636 2637 /// Inlined complex division 2638 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 2639 using FIROpConversion::FIROpConversion; 2640 2641 mlir::LogicalResult 2642 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 2643 mlir::ConversionPatternRewriter &rewriter) const override { 2644 // TODO: Can we use a call to __divdc3 instead? 2645 // Just generate inline code for now. 2646 // given: (x + iy) / (x' + iy') 2647 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 2648 mlir::Value a = adaptor.getOperands()[0]; 2649 mlir::Value b = adaptor.getOperands()[1]; 2650 auto loc = divc.getLoc(); 2651 auto *ctx = divc.getContext(); 2652 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2653 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2654 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 2655 mlir::Type ty = convertType(divc.getType()); 2656 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2657 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2658 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2659 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2660 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 2661 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 2662 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 2663 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 2664 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 2665 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 2666 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 2667 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 2668 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 2669 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 2670 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 2671 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2672 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 2673 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 2674 rewriter.replaceOp(divc, r0.getResult()); 2675 return success(); 2676 } 2677 }; 2678 2679 /// Inlined complex negation 2680 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 2681 using FIROpConversion::FIROpConversion; 2682 2683 mlir::LogicalResult 2684 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 2685 mlir::ConversionPatternRewriter &rewriter) const override { 2686 // given: -(x + iy) 2687 // result: -x - iy 2688 auto *ctxt = neg.getContext(); 2689 auto eleTy = convertType(getComplexEleTy(neg.getType())); 2690 auto ty = convertType(neg.getType()); 2691 auto loc = neg.getLoc(); 2692 mlir::Value o0 = adaptor.getOperands()[0]; 2693 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 2694 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 2695 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 2696 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 2697 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 2698 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 2699 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 2700 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 2701 return success(); 2702 } 2703 }; 2704 2705 /// Conversion pattern for operation that must be dead. The information in these 2706 /// operations is used by other operation. At this point they should not have 2707 /// anymore uses. 2708 /// These operations are normally dead after the pre-codegen pass. 2709 template <typename FromOp> 2710 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 2711 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering) 2712 : FIROpConversion<FromOp>(lowering) {} 2713 using OpAdaptor = typename FromOp::Adaptor; 2714 2715 mlir::LogicalResult 2716 matchAndRewrite(FromOp op, OpAdaptor adaptor, 2717 mlir::ConversionPatternRewriter &rewriter) const final { 2718 if (!op->getUses().empty()) 2719 return rewriter.notifyMatchFailure(op, "op must be dead"); 2720 rewriter.eraseOp(op); 2721 return success(); 2722 } 2723 }; 2724 2725 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 2726 using MustBeDeadConversion::MustBeDeadConversion; 2727 }; 2728 2729 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 2730 using MustBeDeadConversion::MustBeDeadConversion; 2731 }; 2732 2733 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 2734 using MustBeDeadConversion::MustBeDeadConversion; 2735 }; 2736 2737 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 2738 using MustBeDeadConversion::MustBeDeadConversion; 2739 }; 2740 2741 /// `fir.is_present` --> 2742 /// ``` 2743 /// %0 = llvm.mlir.constant(0 : i64) 2744 /// %1 = llvm.ptrtoint %0 2745 /// %2 = llvm.icmp "ne" %1, %0 : i64 2746 /// ``` 2747 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 2748 using FIROpConversion::FIROpConversion; 2749 2750 mlir::LogicalResult 2751 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 2752 mlir::ConversionPatternRewriter &rewriter) const override { 2753 mlir::Type idxTy = lowerTy().indexType(); 2754 mlir::Location loc = isPresent.getLoc(); 2755 auto ptr = adaptor.getOperands()[0]; 2756 2757 if (isPresent.val().getType().isa<fir::BoxCharType>()) { 2758 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 2759 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 2760 2761 mlir::Type ty = structTy.getBody()[0]; 2762 mlir::MLIRContext *ctx = isPresent.getContext(); 2763 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2764 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 2765 } 2766 mlir::LLVM::ConstantOp c0 = 2767 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 2768 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 2769 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 2770 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 2771 2772 return success(); 2773 } 2774 }; 2775 2776 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 2777 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 2778 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 2779 /// element is the length of the character buffer (`#n`). 2780 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 2781 using FIROpConversion::FIROpConversion; 2782 2783 mlir::LogicalResult 2784 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 2785 mlir::ConversionPatternRewriter &rewriter) const override { 2786 mlir::ValueRange operands = adaptor.getOperands(); 2787 MLIRContext *ctx = emboxChar.getContext(); 2788 2789 mlir::Value charBuffer = operands[0]; 2790 mlir::Value charBufferLen = operands[1]; 2791 2792 mlir::Location loc = emboxChar.getLoc(); 2793 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 2794 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 2795 2796 mlir::Type lenTy = 2797 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 2798 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 2799 2800 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2801 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2802 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2803 loc, llvmStructTy, llvmStruct, charBuffer, c0); 2804 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2805 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 2806 2807 return success(); 2808 } 2809 }; 2810 } // namespace 2811 2812 /// Construct an `llvm.extractvalue` instruction. It will return value at 2813 /// element \p x from \p tuple. 2814 static mlir::LLVM::ExtractValueOp 2815 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 2816 mlir::ConversionPatternRewriter &rewriter, 2817 mlir::MLIRContext *ctx, int x) { 2818 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 2819 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 2820 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 2821 } 2822 2823 namespace { 2824 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 2825 /// boxchar. 2826 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 2827 using FIROpConversion::FIROpConversion; 2828 2829 mlir::LogicalResult 2830 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 2831 mlir::ConversionPatternRewriter &rewriter) const override { 2832 mlir::Value boxChar = adaptor.getOperands()[0]; 2833 mlir::Location loc = boxChar.getLoc(); 2834 mlir::MLIRContext *ctx = boxChar.getContext(); 2835 mlir::Type returnValTy = boxCharLen.getResult().getType(); 2836 2837 constexpr int boxcharLenIdx = 1; 2838 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 2839 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 2840 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 2841 rewriter.replaceOp(boxCharLen, lenAfterCast); 2842 2843 return success(); 2844 } 2845 }; 2846 2847 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2848 /// the character buffer and one for the buffer length. 2849 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2850 using FIROpConversion::FIROpConversion; 2851 2852 mlir::LogicalResult 2853 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2854 mlir::ConversionPatternRewriter &rewriter) const override { 2855 MLIRContext *ctx = unboxchar.getContext(); 2856 2857 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2858 mlir::Value tuple = adaptor.getOperands()[0]; 2859 mlir::Type tupleTy = tuple.getType(); 2860 2861 mlir::Location loc = unboxchar.getLoc(); 2862 mlir::Value ptrToBuffer = 2863 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2864 2865 mlir::LLVM::ExtractValueOp len = 2866 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2867 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2868 2869 rewriter.replaceOp(unboxchar, 2870 ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2871 return success(); 2872 } 2873 }; 2874 2875 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 2876 /// components. 2877 /// TODO: Part of supporting Fortran 2003 procedure pointers. 2878 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 2879 using FIROpConversion::FIROpConversion; 2880 2881 mlir::LogicalResult 2882 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 2883 mlir::ConversionPatternRewriter &rewriter) const override { 2884 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 2885 return failure(); 2886 } 2887 }; 2888 2889 /// Convert `fir.field_index`. The conversion depends on whether the size of 2890 /// the record is static or dynamic. 2891 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2892 using FIROpConversion::FIROpConversion; 2893 2894 // NB: most field references should be resolved by this point 2895 mlir::LogicalResult 2896 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2897 mlir::ConversionPatternRewriter &rewriter) const override { 2898 auto recTy = field.on_type().cast<fir::RecordType>(); 2899 unsigned index = recTy.getFieldIndex(field.field_id()); 2900 2901 if (!fir::hasDynamicSize(recTy)) { 2902 // Derived type has compile-time constant layout. Return index of the 2903 // component type in the parent type (to be used in GEP). 2904 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2905 field.getLoc(), rewriter, index)}); 2906 return success(); 2907 } 2908 2909 // Derived type has compile-time constant layout. Call the compiler 2910 // generated function to determine the byte offset of the field at runtime. 2911 // This returns a non-constant. 2912 FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2913 field.getContext(), getOffsetMethodName(recTy, field.field_id())); 2914 NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2915 NamedAttribute fieldAttr = rewriter.getNamedAttr( 2916 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2917 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2918 field, lowerTy().offsetType(), adaptor.getOperands(), 2919 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2920 return success(); 2921 } 2922 2923 // Re-Construct the name of the compiler generated method that calculates the 2924 // offset 2925 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2926 llvm::StringRef field) { 2927 return recTy.getName().str() + "P." + field.str() + ".offset"; 2928 } 2929 }; 2930 2931 /// Convert to (memory) reference to a reference to a subobject. 2932 /// The coordinate_of op is a Swiss army knife operation that can be used on 2933 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2934 /// With unboxed arrays, there is the restriction that the array have a static 2935 /// shape in all but the last column. 2936 struct CoordinateOpConversion 2937 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2938 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2939 2940 mlir::LogicalResult 2941 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2942 mlir::ConversionPatternRewriter &rewriter) const override { 2943 mlir::ValueRange operands = adaptor.getOperands(); 2944 2945 mlir::Location loc = coor.getLoc(); 2946 mlir::Value base = operands[0]; 2947 mlir::Type baseObjectTy = coor.getBaseType(); 2948 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2949 assert(objectTy && "fir.coordinate_of expects a reference type"); 2950 2951 // Complex type - basically, extract the real or imaginary part 2952 if (fir::isa_complex(objectTy)) { 2953 mlir::LLVM::ConstantOp c0 = 2954 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2955 SmallVector<mlir::Value> offs = {c0, operands[1]}; 2956 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2957 rewriter.replaceOp(coor, gep); 2958 return success(); 2959 } 2960 2961 // Boxed type - get the base pointer from the box 2962 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2963 return doRewriteBox(coor, ty, operands, loc, rewriter); 2964 2965 // Reference or pointer type 2966 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType>()) 2967 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2968 2969 return rewriter.notifyMatchFailure( 2970 coor, "fir.coordinate_of base operand has unsupported type"); 2971 } 2972 2973 unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) const { 2974 return fir::hasDynamicSize(ty) 2975 ? op.getDefiningOp() 2976 ->getAttrOfType<mlir::IntegerAttr>("field") 2977 .getInt() 2978 : getIntValue(op); 2979 } 2980 2981 int64_t getIntValue(mlir::Value val) const { 2982 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2983 mlir::Operation *defop = val.getDefiningOp(); 2984 2985 if (auto constOp = dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2986 return constOp.value(); 2987 if (auto llConstOp = dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2988 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2989 return attr.getValue().getSExtValue(); 2990 fir::emitFatalError(val.getLoc(), "must be a constant"); 2991 } 2992 2993 bool hasSubDimensions(mlir::Type type) const { 2994 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2995 } 2996 2997 /// Check whether this form of `!fir.coordinate_of` is supported. These 2998 /// additional checks are required, because we are not yet able to convert 2999 /// all valid forms of `!fir.coordinate_of`. 3000 /// TODO: Either implement the unsupported cases or extend the verifier 3001 /// in FIROps.cpp instead. 3002 bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) const { 3003 const std::size_t numOfCoors = coors.size(); 3004 std::size_t i = 0; 3005 bool subEle = false; 3006 bool ptrEle = false; 3007 for (; i < numOfCoors; ++i) { 3008 mlir::Value nxtOpnd = coors[i]; 3009 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 3010 subEle = true; 3011 i += arrTy.getDimension() - 1; 3012 type = arrTy.getEleTy(); 3013 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 3014 subEle = true; 3015 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3016 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 3017 subEle = true; 3018 type = tupTy.getType(getIntValue(nxtOpnd)); 3019 } else { 3020 ptrEle = true; 3021 } 3022 } 3023 if (ptrEle) 3024 return (!subEle) && (numOfCoors == 1); 3025 return subEle && (i >= numOfCoors); 3026 } 3027 3028 /// Walk the abstract memory layout and determine if the path traverses any 3029 /// array types with unknown shape. Return true iff all the array types have a 3030 /// constant shape along the path. 3031 bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) const { 3032 const std::size_t sz = coors.size(); 3033 std::size_t i = 0; 3034 for (; i < sz; ++i) { 3035 mlir::Value nxtOpnd = coors[i]; 3036 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 3037 if (fir::sequenceWithNonConstantShape(arrTy)) 3038 return false; 3039 i += arrTy.getDimension() - 1; 3040 type = arrTy.getEleTy(); 3041 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 3042 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 3043 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 3044 type = strTy.getType(getIntValue(nxtOpnd)); 3045 } else { 3046 return true; 3047 } 3048 } 3049 return true; 3050 } 3051 3052 private: 3053 mlir::LogicalResult 3054 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 3055 mlir::Location loc, 3056 mlir::ConversionPatternRewriter &rewriter) const { 3057 mlir::Type boxObjTy = coor.getBaseType(); 3058 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 3059 3060 mlir::Value boxBaseAddr = operands[0]; 3061 3062 // 1. SPECIAL CASE (uses `fir.len_param_index`): 3063 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 3064 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 3065 // %addr = coordinate_of %box, %lenp 3066 if (coor.getNumOperands() == 2) { 3067 mlir::Operation *coordinateDef = (*coor.coor().begin()).getDefiningOp(); 3068 if (isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) { 3069 TODO(loc, 3070 "fir.coordinate_of - fir.len_param_index is not supported yet"); 3071 } 3072 } 3073 3074 // 2. GENERAL CASE: 3075 // 2.1. (`fir.array`) 3076 // %box = ... : !fix.box<!fir.array<?xU>> 3077 // %idx = ... : index 3078 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 3079 // 2.2 (`fir.derived`) 3080 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 3081 // %idx = ... : i32 3082 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 3083 // 2.3 (`fir.derived` inside `fir.array`) 3084 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, field_2:f32}>>> 3085 // %idx1 = ... : index 3086 // %idx2 = ... : i32 3087 // %resultAddr = coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 3088 // 2.4. TODO: Either document or disable any other case that the following 3089 // implementation might convert. 3090 mlir::LLVM::ConstantOp c0 = 3091 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 3092 mlir::Value resultAddr = 3093 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 3094 boxBaseAddr, rewriter); 3095 auto currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 3096 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 3097 3098 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 3099 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3100 if (i != 1) 3101 TODO(loc, "fir.array nested inside other array and/or derived type"); 3102 // Applies byte strides from the box. Ignore lower bound from box 3103 // since fir.coordinate_of indexes are zero based. Lowering takes care 3104 // of lower bound aspects. This both accounts for dynamically sized 3105 // types and non contiguous arrays. 3106 auto idxTy = lowerTy().indexType(); 3107 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 3108 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 3109 index < lastIndex; ++index) { 3110 mlir::Value stride = 3111 loadStrideFromBox(loc, operands[0], index - i, rewriter); 3112 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 3113 operands[index], stride); 3114 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 3115 } 3116 auto voidPtrBase = 3117 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 3118 SmallVector<mlir::Value> args{off}; 3119 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 3120 voidPtrBase, args); 3121 i += arrTy.getDimension() - 1; 3122 currentObjTy = arrTy.getEleTy(); 3123 } else if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) { 3124 auto recRefTy = 3125 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 3126 mlir::Value nxtOpnd = operands[i]; 3127 auto memObj = 3128 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 3129 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 3130 currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3131 auto llvmCurrentObjTy = lowerTy().convertType(currentObjTy); 3132 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 3133 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 3134 args); 3135 resultAddr = 3136 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 3137 } else { 3138 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 3139 } 3140 } 3141 3142 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 3143 return success(); 3144 } 3145 3146 mlir::LogicalResult 3147 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 3148 mlir::ValueRange operands, mlir::Location loc, 3149 mlir::ConversionPatternRewriter &rewriter) const { 3150 mlir::Type baseObjectTy = coor.getBaseType(); 3151 3152 mlir::Type currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 3153 bool hasSubdimension = hasSubDimensions(currentObjTy); 3154 bool columnIsDeferred = !hasSubdimension; 3155 3156 if (!supportedCoordinate(currentObjTy, operands.drop_front(1))) { 3157 TODO(loc, "unsupported combination of coordinate operands"); 3158 } 3159 3160 const bool hasKnownShape = 3161 arraysHaveKnownShape(currentObjTy, operands.drop_front(1)); 3162 3163 // If only the column is `?`, then we can simply place the column value in 3164 // the 0-th GEP position. 3165 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3166 if (!hasKnownShape) { 3167 const unsigned sz = arrTy.getDimension(); 3168 if (arraysHaveKnownShape(arrTy.getEleTy(), 3169 operands.drop_front(1 + sz))) { 3170 llvm::ArrayRef<int64_t> shape = arrTy.getShape(); 3171 bool allConst = true; 3172 for (unsigned i = 0; i < sz - 1; ++i) { 3173 if (shape[i] < 0) { 3174 allConst = false; 3175 break; 3176 } 3177 } 3178 if (allConst) 3179 columnIsDeferred = true; 3180 } 3181 } 3182 } 3183 3184 if (fir::hasDynamicSize(fir::unwrapSequenceType(currentObjTy))) { 3185 mlir::emitError( 3186 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 3187 return failure(); 3188 } 3189 3190 if (hasKnownShape || columnIsDeferred) { 3191 SmallVector<mlir::Value> offs; 3192 if (hasKnownShape && hasSubdimension) { 3193 mlir::LLVM::ConstantOp c0 = 3194 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 3195 offs.push_back(c0); 3196 } 3197 const std::size_t sz = operands.size(); 3198 Optional<int> dims; 3199 SmallVector<mlir::Value> arrIdx; 3200 for (std::size_t i = 1; i < sz; ++i) { 3201 mlir::Value nxtOpnd = operands[i]; 3202 3203 if (!currentObjTy) { 3204 mlir::emitError(loc, "invalid coordinate/check failed"); 3205 return failure(); 3206 } 3207 3208 // check if the i-th coordinate relates to an array 3209 if (dims.hasValue()) { 3210 arrIdx.push_back(nxtOpnd); 3211 int dimsLeft = *dims; 3212 if (dimsLeft > 1) { 3213 dims = dimsLeft - 1; 3214 continue; 3215 } 3216 currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy(); 3217 // append array range in reverse (FIR arrays are column-major) 3218 offs.append(arrIdx.rbegin(), arrIdx.rend()); 3219 arrIdx.clear(); 3220 dims.reset(); 3221 continue; 3222 } 3223 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3224 int d = arrTy.getDimension() - 1; 3225 if (d > 0) { 3226 dims = d; 3227 arrIdx.push_back(nxtOpnd); 3228 continue; 3229 } 3230 currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy(); 3231 offs.push_back(nxtOpnd); 3232 continue; 3233 } 3234 3235 // check if the i-th coordinate relates to a field 3236 if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) 3237 currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3238 else if (auto tupTy = currentObjTy.dyn_cast<mlir::TupleType>()) 3239 currentObjTy = tupTy.getType(getIntValue(nxtOpnd)); 3240 else 3241 currentObjTy = nullptr; 3242 3243 offs.push_back(nxtOpnd); 3244 } 3245 if (dims.hasValue()) 3246 offs.append(arrIdx.rbegin(), arrIdx.rend()); 3247 mlir::Value base = operands[0]; 3248 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 3249 rewriter.replaceOp(coor, retval); 3250 return success(); 3251 } 3252 3253 mlir::emitError(loc, "fir.coordinate_of base operand has unsupported type"); 3254 return failure(); 3255 } 3256 }; 3257 3258 } // namespace 3259 3260 namespace { 3261 /// Convert FIR dialect to LLVM dialect 3262 /// 3263 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3264 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3265 /// 3266 /// This pass is not complete yet. We are upstreaming it in small patches. 3267 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3268 public: 3269 mlir::ModuleOp getModule() { return getOperation(); } 3270 3271 void runOnOperation() override final { 3272 auto mod = getModule(); 3273 if (!forcedTargetTriple.empty()) { 3274 fir::setTargetTriple(mod, forcedTargetTriple); 3275 } 3276 3277 auto *context = getModule().getContext(); 3278 fir::LLVMTypeConverter typeConverter{getModule()}; 3279 mlir::RewritePatternSet pattern(context); 3280 pattern.insert< 3281 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3282 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3283 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3284 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3285 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3286 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3287 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3288 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3289 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3290 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3291 FreeMemOpConversion, HasValueOpConversion, GenTypeDescOpConversion, 3292 GlobalLenOpConversion, GlobalOpConversion, InsertOnRangeOpConversion, 3293 InsertValueOpConversion, IsPresentOpConversion, 3294 LenParamIndexOpConversion, LoadOpConversion, NegcOpConversion, 3295 NoReassocOpConversion, MulcOpConversion, SelectCaseOpConversion, 3296 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3297 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3298 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3299 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3300 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3301 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>( 3302 typeConverter); 3303 mlir::populateStdToLLVMConversionPatterns(typeConverter, pattern); 3304 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3305 pattern); 3306 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3307 pattern); 3308 mlir::ConversionTarget target{*context}; 3309 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3310 3311 // required NOPs for applying a full conversion 3312 target.addLegalOp<mlir::ModuleOp>(); 3313 3314 // apply the patterns 3315 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3316 std::move(pattern)))) { 3317 signalPassFailure(); 3318 } 3319 } 3320 }; 3321 3322 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3323 struct LLVMIRLoweringPass 3324 : public mlir::PassWrapper<LLVMIRLoweringPass, 3325 mlir::OperationPass<mlir::ModuleOp>> { 3326 using Printer = fir::LLVMIRLoweringPrinter; 3327 LLVMIRLoweringPass(raw_ostream &output, Printer p) 3328 : output{output}, printer{p} {} 3329 3330 mlir::ModuleOp getModule() { return getOperation(); } 3331 3332 void runOnOperation() override final { 3333 auto *ctx = getModule().getContext(); 3334 auto optName = getModule().getName(); 3335 llvm::LLVMContext llvmCtx; 3336 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3337 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3338 printer(*llvmModule, output); 3339 return; 3340 } 3341 3342 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3343 signalPassFailure(); 3344 } 3345 3346 private: 3347 raw_ostream &output; 3348 Printer printer; 3349 }; 3350 3351 } // namespace 3352 3353 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3354 return std::make_unique<FIRToLLVMLowering>(); 3355 } 3356 3357 std::unique_ptr<mlir::Pass> 3358 fir::createLLVMDialectToLLVMPass(raw_ostream &output, 3359 fir::LLVMIRLoweringPrinter printer) { 3360 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3361 } 3362