1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/TypeCode.h" 20 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 21 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 22 #include "mlir/Conversion/LLVMCommon/Pattern.h" 23 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 24 #include "mlir/IR/BuiltinTypes.h" 25 #include "mlir/IR/Matchers.h" 26 #include "mlir/Pass/Pass.h" 27 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 28 #include "llvm/ADT/ArrayRef.h" 29 30 #define DEBUG_TYPE "flang-codegen" 31 32 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 33 #include "TypeConverter.h" 34 35 // TODO: This should really be recovered from the specified target. 36 static constexpr unsigned defaultAlign = 8; 37 38 /// `fir.box` attribute values as defined for CFI_attribute_t in 39 /// flang/ISO_Fortran_binding.h. 40 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 41 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 42 43 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 44 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 45 } 46 47 static mlir::LLVM::ConstantOp 48 genConstantIndex(mlir::Location loc, mlir::Type ity, 49 mlir::ConversionPatternRewriter &rewriter, 50 std::int64_t offset) { 51 auto cattr = rewriter.getI64IntegerAttr(offset); 52 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 53 } 54 55 static Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 56 mlir::Block *insertBefore) { 57 assert(insertBefore && "expected valid insertion block"); 58 return rewriter.createBlock(insertBefore->getParent(), 59 mlir::Region::iterator(insertBefore)); 60 } 61 62 namespace { 63 /// FIR conversion pattern template 64 template <typename FromOp> 65 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 66 public: 67 explicit FIROpConversion(fir::LLVMTypeConverter &lowering) 68 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering) {} 69 70 protected: 71 mlir::Type convertType(mlir::Type ty) const { 72 return lowerTy().convertType(ty); 73 } 74 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 75 76 mlir::Type getVoidPtrType() const { 77 return mlir::LLVM::LLVMPointerType::get( 78 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 79 } 80 81 mlir::LLVM::ConstantOp 82 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 83 int value) const { 84 mlir::Type i32Ty = rewriter.getI32Type(); 85 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 86 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 87 } 88 89 mlir::LLVM::ConstantOp 90 genConstantOffset(mlir::Location loc, 91 mlir::ConversionPatternRewriter &rewriter, 92 int offset) const { 93 mlir::Type ity = lowerTy().offsetType(); 94 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 95 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 96 } 97 98 /// Construct code sequence to extract the specifc value from a `fir.box`. 99 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 100 mlir::Type resultTy, 101 mlir::ConversionPatternRewriter &rewriter, 102 unsigned boxValue) const { 103 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 104 mlir::LLVM::ConstantOp cValuePos = 105 genConstantOffset(loc, rewriter, boxValue); 106 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 107 auto p = rewriter.create<mlir::LLVM::GEPOp>( 108 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 109 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 110 } 111 112 /// Method to construct code sequence to get the triple for dimension `dim` 113 /// from a box. 114 SmallVector<mlir::Value, 3> 115 getDimsFromBox(mlir::Location loc, ArrayRef<mlir::Type> retTys, 116 mlir::Value box, mlir::Value dim, 117 mlir::ConversionPatternRewriter &rewriter) const { 118 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 119 mlir::LLVM::ConstantOp cDims = 120 genConstantOffset(loc, rewriter, kDimsPosInBox); 121 mlir::LLVM::LoadOp l0 = 122 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 123 mlir::LLVM::LoadOp l1 = 124 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 125 mlir::LLVM::LoadOp l2 = 126 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 127 return {l0.getResult(), l1.getResult(), l2.getResult()}; 128 } 129 130 mlir::LLVM::LoadOp 131 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 132 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 133 mlir::Type ty, 134 mlir::ConversionPatternRewriter &rewriter) const { 135 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 136 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 137 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 138 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 139 } 140 141 mlir::Value 142 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 143 mlir::ConversionPatternRewriter &rewriter) const { 144 auto idxTy = lowerTy().indexType(); 145 auto c0 = genConstantOffset(loc, rewriter, 0); 146 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 147 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 148 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 149 rewriter); 150 } 151 152 /// Read base address from a fir.box. Returned address has type ty. 153 mlir::Value 154 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 155 mlir::ConversionPatternRewriter &rewriter) const { 156 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 157 mlir::LLVM::ConstantOp cAddr = 158 genConstantOffset(loc, rewriter, kAddrPosInBox); 159 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 160 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 161 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 162 } 163 164 mlir::Value 165 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 166 mlir::ConversionPatternRewriter &rewriter) const { 167 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 168 mlir::LLVM::ConstantOp cElemLen = 169 genConstantOffset(loc, rewriter, kElemLenPosInBox); 170 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 171 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 172 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 173 } 174 175 // Load the attribute from the \p box and perform a check against \p maskValue 176 // The final comparison is implemented as `(attribute & maskValue) != 0`. 177 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 178 mlir::ConversionPatternRewriter &rewriter, 179 unsigned maskValue) const { 180 mlir::Type attrTy = rewriter.getI32Type(); 181 mlir::Value attribute = 182 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 183 mlir::LLVM::ConstantOp attrMask = 184 genConstantOffset(loc, rewriter, maskValue); 185 auto maskRes = 186 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 187 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 188 return rewriter.create<mlir::LLVM::ICmpOp>( 189 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 190 } 191 192 // Get the element type given an LLVM type that is of the form 193 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 194 static mlir::Type getBoxEleTy(mlir::Type type, 195 llvm::ArrayRef<unsigned> indexes) { 196 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 197 type = t.getElementType(); 198 for (auto i : indexes) { 199 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 200 assert(!t.isOpaque() && i < t.getBody().size()); 201 type = t.getBody()[i]; 202 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 203 type = t.getElementType(); 204 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 205 type = t.getElementType(); 206 } else { 207 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 208 "request for invalid box element type"); 209 } 210 } 211 return type; 212 } 213 214 // Return LLVM type of the base address given the LLVM type 215 // of the related descriptor (lowered fir.box type). 216 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 217 return getBoxEleTy(type, {kAddrPosInBox}); 218 } 219 220 template <typename... ARGS> 221 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 222 mlir::ConversionPatternRewriter &rewriter, 223 mlir::Value base, ARGS... args) const { 224 SmallVector<mlir::Value> cv{args...}; 225 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 226 } 227 228 /// Perform an extension or truncation as needed on an integer value. Lowering 229 /// to the specific target may involve some sign-extending or truncation of 230 /// values, particularly to fit them from abstract box types to the 231 /// appropriate reified structures. 232 mlir::Value integerCast(mlir::Location loc, 233 mlir::ConversionPatternRewriter &rewriter, 234 mlir::Type ty, mlir::Value val) const { 235 auto valTy = val.getType(); 236 // If the value was not yet lowered, lower its type so that it can 237 // be used in getPrimitiveTypeSizeInBits. 238 if (!valTy.isa<mlir::IntegerType>()) 239 valTy = convertType(valTy); 240 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 241 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 242 if (toSize < fromSize) 243 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 244 if (toSize > fromSize) 245 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 246 return val; 247 } 248 249 fir::LLVMTypeConverter &lowerTy() const { 250 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 251 } 252 }; 253 254 /// FIR conversion pattern template 255 template <typename FromOp> 256 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 257 public: 258 using FIROpConversion<FromOp>::FIROpConversion; 259 using OpAdaptor = typename FromOp::Adaptor; 260 261 mlir::LogicalResult 262 matchAndRewrite(FromOp op, OpAdaptor adaptor, 263 mlir::ConversionPatternRewriter &rewriter) const final { 264 mlir::Type ty = this->convertType(op.getType()); 265 return doRewrite(op, ty, adaptor, rewriter); 266 } 267 268 virtual mlir::LogicalResult 269 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 270 mlir::ConversionPatternRewriter &rewriter) const = 0; 271 }; 272 273 /// Create value signaling an absent optional argument in a call, e.g. 274 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 275 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 276 using FIROpConversion::FIROpConversion; 277 278 mlir::LogicalResult 279 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 280 mlir::ConversionPatternRewriter &rewriter) const override { 281 mlir::Type ty = convertType(absent.getType()); 282 mlir::Location loc = absent.getLoc(); 283 284 if (absent.getType().isa<fir::BoxCharType>()) { 285 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 286 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 287 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 288 auto nullField = 289 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 290 mlir::MLIRContext *ctx = absent.getContext(); 291 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 292 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 293 absent, ty, undefStruct, nullField, c0); 294 } else { 295 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 296 } 297 return success(); 298 } 299 }; 300 301 // Lower `fir.address_of` operation to `llvm.address_of` operation. 302 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 303 using FIROpConversion::FIROpConversion; 304 305 mlir::LogicalResult 306 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 307 mlir::ConversionPatternRewriter &rewriter) const override { 308 auto ty = convertType(addr.getType()); 309 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 310 addr, ty, addr.symbol().getRootReference().getValue()); 311 return success(); 312 } 313 }; 314 } // namespace 315 316 /// Lookup the function to compute the memory size of this parametric derived 317 /// type. The size of the object may depend on the LEN type parameters of the 318 /// derived type. 319 static mlir::LLVM::LLVMFuncOp 320 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 321 mlir::ConversionPatternRewriter &rewriter) { 322 auto module = op->getParentOfType<mlir::ModuleOp>(); 323 std::string name = recTy.getName().str() + "P.mem.size"; 324 return module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name); 325 } 326 327 namespace { 328 /// convert to LLVM IR dialect `alloca` 329 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 330 using FIROpConversion::FIROpConversion; 331 332 mlir::LogicalResult 333 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 334 mlir::ConversionPatternRewriter &rewriter) const override { 335 mlir::ValueRange operands = adaptor.getOperands(); 336 auto loc = alloc.getLoc(); 337 mlir::Type ity = lowerTy().indexType(); 338 unsigned i = 0; 339 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 340 mlir::Type ty = convertType(alloc.getType()); 341 mlir::Type resultTy = ty; 342 if (alloc.hasLenParams()) { 343 unsigned end = alloc.numLenParams(); 344 llvm::SmallVector<mlir::Value> lenParams; 345 for (; i < end; ++i) 346 lenParams.push_back(operands[i]); 347 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 348 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 349 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 350 chrTy.getContext(), chrTy.getFKind()); 351 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 352 assert(end == 1); 353 size = integerCast(loc, rewriter, ity, lenParams[0]); 354 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 355 mlir::LLVM::LLVMFuncOp memSizeFn = 356 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 357 if (!memSizeFn) 358 emitError(loc, "did not find allocation function"); 359 mlir::NamedAttribute attr = rewriter.getNamedAttr( 360 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 361 auto call = rewriter.create<mlir::LLVM::CallOp>( 362 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 363 size = call.getResult(0); 364 ty = mlir::LLVM::LLVMPointerType::get( 365 mlir::IntegerType::get(alloc.getContext(), 8)); 366 } else { 367 return emitError(loc, "unexpected type ") 368 << scalarType << " with type parameters"; 369 } 370 } 371 if (alloc.hasShapeOperands()) { 372 mlir::Type allocEleTy = fir::unwrapRefType(alloc.getType()); 373 // Scale the size by constant factors encoded in the array type. 374 // We only do this for arrays that don't have a constant interior, since 375 // those are the only ones that get decayed to a pointer to the element 376 // type. 377 if (auto seqTy = allocEleTy.dyn_cast<fir::SequenceType>()) { 378 if (!seqTy.hasConstantInterior()) { 379 fir::SequenceType::Extent constSize = 1; 380 for (auto extent : seqTy.getShape()) 381 if (extent != fir::SequenceType::getUnknownExtent()) 382 constSize *= extent; 383 mlir::Value constVal{ 384 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 385 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, constVal); 386 } 387 } 388 unsigned end = operands.size(); 389 for (; i < end; ++i) 390 size = rewriter.create<mlir::LLVM::MulOp>( 391 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 392 } 393 if (ty == resultTy) { 394 // Do not emit the bitcast if ty and resultTy are the same. 395 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 396 alloc->getAttrs()); 397 } else { 398 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 399 alloc->getAttrs()); 400 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 401 } 402 return success(); 403 } 404 }; 405 406 /// Lower `fir.box_addr` to the sequence of operations to extract the first 407 /// element of the box. 408 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 409 using FIROpConversion::FIROpConversion; 410 411 mlir::LogicalResult 412 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 413 mlir::ConversionPatternRewriter &rewriter) const override { 414 mlir::Value a = adaptor.getOperands()[0]; 415 auto loc = boxaddr.getLoc(); 416 mlir::Type ty = convertType(boxaddr.getType()); 417 if (auto argty = boxaddr.val().getType().dyn_cast<fir::BoxType>()) { 418 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 419 } else { 420 auto c0attr = rewriter.getI32IntegerAttr(0); 421 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 422 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 423 c0); 424 } 425 return success(); 426 } 427 }; 428 429 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 430 /// dimension infomartion from the boxed value. 431 /// Result in a triple set of GEPs and loads. 432 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 433 using FIROpConversion::FIROpConversion; 434 435 mlir::LogicalResult 436 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 437 mlir::ConversionPatternRewriter &rewriter) const override { 438 SmallVector<mlir::Type, 3> resultTypes = { 439 convertType(boxdims.getResult(0).getType()), 440 convertType(boxdims.getResult(1).getType()), 441 convertType(boxdims.getResult(2).getType()), 442 }; 443 auto results = 444 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 445 adaptor.getOperands()[1], rewriter); 446 rewriter.replaceOp(boxdims, results); 447 return success(); 448 } 449 }; 450 451 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 452 /// an element in the boxed value. 453 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 454 using FIROpConversion::FIROpConversion; 455 456 mlir::LogicalResult 457 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 458 mlir::ConversionPatternRewriter &rewriter) const override { 459 mlir::Value a = adaptor.getOperands()[0]; 460 auto loc = boxelesz.getLoc(); 461 auto ty = convertType(boxelesz.getType()); 462 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 463 rewriter.replaceOp(boxelesz, elemSize); 464 return success(); 465 } 466 }; 467 468 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 469 /// boxed value was from an ALLOCATABLE entity. 470 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 471 using FIROpConversion::FIROpConversion; 472 473 mlir::LogicalResult 474 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 475 mlir::ConversionPatternRewriter &rewriter) const override { 476 mlir::Value box = adaptor.getOperands()[0]; 477 auto loc = boxisalloc.getLoc(); 478 mlir::Value check = 479 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 480 rewriter.replaceOp(boxisalloc, check); 481 return success(); 482 } 483 }; 484 485 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 486 /// boxed is an array. 487 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 488 using FIROpConversion::FIROpConversion; 489 490 mlir::LogicalResult 491 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 492 mlir::ConversionPatternRewriter &rewriter) const override { 493 mlir::Value a = adaptor.getOperands()[0]; 494 auto loc = boxisarray.getLoc(); 495 auto rank = 496 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 497 auto c0 = genConstantOffset(loc, rewriter, 0); 498 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 499 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 500 return success(); 501 } 502 }; 503 504 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 505 /// boxed value was from a POINTER entity. 506 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 507 using FIROpConversion::FIROpConversion; 508 509 mlir::LogicalResult 510 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 511 mlir::ConversionPatternRewriter &rewriter) const override { 512 mlir::Value box = adaptor.getOperands()[0]; 513 auto loc = boxisptr.getLoc(); 514 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 515 rewriter.replaceOp(boxisptr, check); 516 return success(); 517 } 518 }; 519 520 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 521 /// the box. 522 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 523 using FIROpConversion::FIROpConversion; 524 525 mlir::LogicalResult 526 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 527 mlir::ConversionPatternRewriter &rewriter) const override { 528 mlir::Value a = adaptor.getOperands()[0]; 529 auto loc = boxrank.getLoc(); 530 mlir::Type ty = convertType(boxrank.getType()); 531 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 532 rewriter.replaceOp(boxrank, result); 533 return success(); 534 } 535 }; 536 537 /// Lower `fir.string_lit` to LLVM IR dialect operation. 538 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 539 using FIROpConversion::FIROpConversion; 540 541 mlir::LogicalResult 542 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 543 mlir::ConversionPatternRewriter &rewriter) const override { 544 auto ty = convertType(constop.getType()); 545 auto attr = constop.getValue(); 546 if (attr.isa<mlir::StringAttr>()) { 547 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 548 return success(); 549 } 550 551 auto arr = attr.cast<mlir::ArrayAttr>(); 552 auto charTy = constop.getType().cast<fir::CharacterType>(); 553 unsigned bits = lowerTy().characterBitsize(charTy); 554 mlir::Type intTy = rewriter.getIntegerType(bits); 555 auto attrs = llvm::map_range( 556 arr.getValue(), [intTy, bits](mlir::Attribute attr) -> Attribute { 557 return mlir::IntegerAttr::get( 558 intTy, 559 attr.cast<mlir::IntegerAttr>().getValue().sextOrTrunc(bits)); 560 }); 561 mlir::Type vecType = mlir::VectorType::get(arr.size(), intTy); 562 auto denseAttr = mlir::DenseElementsAttr::get( 563 vecType.cast<mlir::ShapedType>(), llvm::to_vector<8>(attrs)); 564 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>(constop, ty, 565 denseAttr); 566 return success(); 567 } 568 }; 569 570 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 571 /// boxproc. 572 /// TODO: Part of supporting Fortran 2003 procedure pointers. 573 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 574 using FIROpConversion::FIROpConversion; 575 576 mlir::LogicalResult 577 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 578 mlir::ConversionPatternRewriter &rewriter) const override { 579 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 580 return failure(); 581 } 582 }; 583 584 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 585 /// descriptor from the box. 586 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 587 using FIROpConversion::FIROpConversion; 588 589 mlir::LogicalResult 590 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 591 mlir::ConversionPatternRewriter &rewriter) const override { 592 mlir::Value box = adaptor.getOperands()[0]; 593 auto loc = boxtypedesc.getLoc(); 594 mlir::Type typeTy = 595 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 596 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 597 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 598 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 599 result); 600 return success(); 601 } 602 }; 603 604 // `fir.call` -> `llvm.call` 605 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 606 using FIROpConversion::FIROpConversion; 607 608 mlir::LogicalResult 609 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 610 mlir::ConversionPatternRewriter &rewriter) const override { 611 SmallVector<mlir::Type> resultTys; 612 for (auto r : call.getResults()) 613 resultTys.push_back(convertType(r.getType())); 614 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 615 call, resultTys, adaptor.getOperands(), call->getAttrs()); 616 return success(); 617 } 618 }; 619 } // namespace 620 621 static mlir::Type getComplexEleTy(mlir::Type complex) { 622 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 623 return cc.getElementType(); 624 return complex.cast<fir::ComplexType>().getElementType(); 625 } 626 627 namespace { 628 /// Compare complex values 629 /// 630 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 631 /// 632 /// For completeness, all other comparison are done on the real component only. 633 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 634 using FIROpConversion::FIROpConversion; 635 636 mlir::LogicalResult 637 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 638 mlir::ConversionPatternRewriter &rewriter) const override { 639 mlir::ValueRange operands = adaptor.getOperands(); 640 mlir::MLIRContext *ctxt = cmp.getContext(); 641 mlir::Type eleTy = convertType(getComplexEleTy(cmp.lhs().getType())); 642 mlir::Type resTy = convertType(cmp.getType()); 643 mlir::Location loc = cmp.getLoc(); 644 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 645 SmallVector<mlir::Value, 2> rp{rewriter.create<mlir::LLVM::ExtractValueOp>( 646 loc, eleTy, operands[0], pos0), 647 rewriter.create<mlir::LLVM::ExtractValueOp>( 648 loc, eleTy, operands[1], pos0)}; 649 auto rcp = 650 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 651 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 652 SmallVector<mlir::Value, 2> ip{rewriter.create<mlir::LLVM::ExtractValueOp>( 653 loc, eleTy, operands[0], pos1), 654 rewriter.create<mlir::LLVM::ExtractValueOp>( 655 loc, eleTy, operands[1], pos1)}; 656 auto icp = 657 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 658 SmallVector<mlir::Value, 2> cp{rcp, icp}; 659 switch (cmp.getPredicate()) { 660 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 661 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 662 break; 663 case mlir::arith::CmpFPredicate::UNE: // .NE. 664 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 665 break; 666 default: 667 rewriter.replaceOp(cmp, rcp.getResult()); 668 break; 669 } 670 return success(); 671 } 672 }; 673 674 /// Lower complex constants 675 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 676 using FIROpConversion::FIROpConversion; 677 678 mlir::LogicalResult 679 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 680 mlir::ConversionPatternRewriter &rewriter) const override { 681 mlir::Location loc = conc.getLoc(); 682 mlir::MLIRContext *ctx = conc.getContext(); 683 mlir::Type ty = convertType(conc.getType()); 684 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 685 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 686 auto realPart = 687 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 688 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 689 auto imPart = 690 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 691 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 692 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 693 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 694 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 695 loc, ty, undef, realPart, realIndex); 696 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 697 imPart, imIndex); 698 return success(); 699 } 700 701 inline APFloat getValue(mlir::Attribute attr) const { 702 return attr.cast<fir::RealAttr>().getValue(); 703 } 704 }; 705 706 /// convert value of from-type to value of to-type 707 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 708 using FIROpConversion::FIROpConversion; 709 710 static bool isFloatingPointTy(mlir::Type ty) { 711 return ty.isa<mlir::FloatType>(); 712 } 713 714 mlir::LogicalResult 715 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 716 mlir::ConversionPatternRewriter &rewriter) const override { 717 auto fromTy = convertType(convert.value().getType()); 718 auto toTy = convertType(convert.res().getType()); 719 mlir::Value op0 = adaptor.getOperands()[0]; 720 if (fromTy == toTy) { 721 rewriter.replaceOp(convert, op0); 722 return success(); 723 } 724 auto loc = convert.getLoc(); 725 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 726 unsigned toBits, mlir::Type toTy) -> mlir::Value { 727 if (fromBits == toBits) { 728 // TODO: Converting between two floating-point representations with the 729 // same bitwidth is not allowed for now. 730 mlir::emitError(loc, 731 "cannot implicitly convert between two floating-point " 732 "representations of the same bitwidth"); 733 return {}; 734 } 735 if (fromBits > toBits) 736 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 737 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 738 }; 739 // Complex to complex conversion. 740 if (fir::isa_complex(convert.value().getType()) && 741 fir::isa_complex(convert.res().getType())) { 742 // Special case: handle the conversion of a complex such that both the 743 // real and imaginary parts are converted together. 744 auto zero = mlir::ArrayAttr::get(convert.getContext(), 745 rewriter.getI32IntegerAttr(0)); 746 auto one = mlir::ArrayAttr::get(convert.getContext(), 747 rewriter.getI32IntegerAttr(1)); 748 auto ty = convertType(getComplexEleTy(convert.value().getType())); 749 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 750 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 751 auto nt = convertType(getComplexEleTy(convert.res().getType())); 752 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 753 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 754 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 755 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 756 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 757 auto i1 = 758 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 759 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 760 ic, one); 761 return mlir::success(); 762 } 763 // Floating point to floating point conversion. 764 if (isFloatingPointTy(fromTy)) { 765 if (isFloatingPointTy(toTy)) { 766 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 767 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 768 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 769 rewriter.replaceOp(convert, v); 770 return mlir::success(); 771 } 772 if (toTy.isa<mlir::IntegerType>()) { 773 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 774 return mlir::success(); 775 } 776 } else if (fromTy.isa<mlir::IntegerType>()) { 777 // Integer to integer conversion. 778 if (toTy.isa<mlir::IntegerType>()) { 779 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 780 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 781 assert(fromBits != toBits); 782 if (fromBits > toBits) { 783 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 784 return mlir::success(); 785 } 786 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 787 return mlir::success(); 788 } 789 // Integer to floating point conversion. 790 if (isFloatingPointTy(toTy)) { 791 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 792 return mlir::success(); 793 } 794 // Integer to pointer conversion. 795 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 796 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 797 return mlir::success(); 798 } 799 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 800 // Pointer to integer conversion. 801 if (toTy.isa<mlir::IntegerType>()) { 802 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 803 return mlir::success(); 804 } 805 // Pointer to pointer conversion. 806 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 807 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 808 return mlir::success(); 809 } 810 } 811 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 812 } 813 }; 814 815 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 816 /// table. 817 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 818 using FIROpConversion::FIROpConversion; 819 820 mlir::LogicalResult 821 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 822 mlir::ConversionPatternRewriter &rewriter) const override { 823 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 824 return failure(); 825 } 826 }; 827 828 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 829 /// derived type. 830 struct DispatchTableOpConversion 831 : public FIROpConversion<fir::DispatchTableOp> { 832 using FIROpConversion::FIROpConversion; 833 834 mlir::LogicalResult 835 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 836 mlir::ConversionPatternRewriter &rewriter) const override { 837 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 838 return failure(); 839 } 840 }; 841 842 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 843 /// method-name to a function. 844 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 845 using FIROpConversion::FIROpConversion; 846 847 mlir::LogicalResult 848 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 849 mlir::ConversionPatternRewriter &rewriter) const override { 850 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 851 return failure(); 852 } 853 }; 854 855 /// Lower `fir.global_len` operation. 856 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 857 using FIROpConversion::FIROpConversion; 858 859 mlir::LogicalResult 860 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 861 mlir::ConversionPatternRewriter &rewriter) const override { 862 TODO(globalLen.getLoc(), "fir.global_len codegen"); 863 return failure(); 864 } 865 }; 866 867 /// Lower fir.len_param_index 868 struct LenParamIndexOpConversion 869 : public FIROpConversion<fir::LenParamIndexOp> { 870 using FIROpConversion::FIROpConversion; 871 872 // FIXME: this should be specialized by the runtime target 873 mlir::LogicalResult 874 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 875 mlir::ConversionPatternRewriter &rewriter) const override { 876 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 877 } 878 }; 879 880 /// Lower `fir.gentypedesc` to a global constant. 881 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 882 using FIROpConversion::FIROpConversion; 883 884 mlir::LogicalResult 885 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 886 mlir::ConversionPatternRewriter &rewriter) const override { 887 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 888 return failure(); 889 } 890 }; 891 } // namespace 892 893 /// Return the LLVMFuncOp corresponding to the standard malloc call. 894 static mlir::LLVM::LLVMFuncOp 895 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 896 auto module = op->getParentOfType<mlir::ModuleOp>(); 897 if (mlir::LLVM::LLVMFuncOp mallocFunc = 898 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 899 return mallocFunc; 900 mlir::OpBuilder moduleBuilder( 901 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 902 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 903 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 904 rewriter.getUnknownLoc(), "malloc", 905 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 906 indexType, 907 /*isVarArg=*/false)); 908 } 909 910 /// Helper function for generating the LLVM IR that computes the size 911 /// in bytes for a derived type. 912 static mlir::Value 913 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 914 mlir::ConversionPatternRewriter &rewriter) { 915 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 916 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 917 llvm::SmallVector<mlir::Value> args{one}; 918 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 919 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 920 } 921 922 namespace { 923 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 924 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 925 using FIROpConversion::FIROpConversion; 926 927 mlir::LogicalResult 928 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 929 mlir::ConversionPatternRewriter &rewriter) const override { 930 mlir::Type ty = convertType(heap.getType()); 931 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 932 mlir::Location loc = heap.getLoc(); 933 auto ity = lowerTy().indexType(); 934 if (auto recTy = fir::unwrapSequenceType(heap.getAllocatedType()) 935 .dyn_cast<fir::RecordType>()) 936 if (recTy.getNumLenParams() != 0) { 937 TODO(loc, 938 "fir.allocmem codegen of derived type with length parameters"); 939 return failure(); 940 } 941 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 942 for (mlir::Value opnd : adaptor.getOperands()) 943 size = rewriter.create<mlir::LLVM::MulOp>( 944 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 945 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 946 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 947 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 948 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 949 malloc.getResult(0)); 950 return success(); 951 } 952 953 // Compute the (allocation) size of the allocmem type in bytes. 954 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 955 mlir::ConversionPatternRewriter &rewriter, 956 mlir::Type llTy) const { 957 // Use the primitive size, if available. 958 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 959 if (auto size = 960 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 961 return genConstantIndex(loc, idxTy, rewriter, size / 8); 962 963 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 964 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 965 } 966 }; 967 } // namespace 968 969 /// Return the LLVMFuncOp corresponding to the standard free call. 970 static mlir::LLVM::LLVMFuncOp 971 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 972 auto module = op->getParentOfType<mlir::ModuleOp>(); 973 if (mlir::LLVM::LLVMFuncOp freeFunc = 974 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 975 return freeFunc; 976 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 977 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 978 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 979 rewriter.getUnknownLoc(), "free", 980 mlir::LLVM::LLVMFunctionType::get(voidType, 981 getVoidPtrType(op.getContext()), 982 /*isVarArg=*/false)); 983 } 984 985 namespace { 986 /// Lower a `fir.freemem` instruction into `llvm.call @free` 987 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 988 using FIROpConversion::FIROpConversion; 989 990 mlir::LogicalResult 991 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 992 mlir::ConversionPatternRewriter &rewriter) const override { 993 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 994 mlir::Location loc = freemem.getLoc(); 995 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 996 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 997 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 998 rewriter.create<mlir::LLVM::CallOp>( 999 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1000 rewriter.eraseOp(freemem); 1001 return success(); 1002 } 1003 }; 1004 1005 /// Convert `fir.end` 1006 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 1007 using FIROpConversion::FIROpConversion; 1008 1009 mlir::LogicalResult 1010 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 1011 mlir::ConversionPatternRewriter &rewriter) const override { 1012 TODO(firEnd.getLoc(), "fir.end codegen"); 1013 return failure(); 1014 } 1015 }; 1016 1017 /// Lower `fir.has_value` operation to `llvm.return` operation. 1018 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 1019 using FIROpConversion::FIROpConversion; 1020 1021 mlir::LogicalResult 1022 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 1023 mlir::ConversionPatternRewriter &rewriter) const override { 1024 rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(op, adaptor.getOperands()); 1025 return success(); 1026 } 1027 }; 1028 1029 /// Lower `fir.global` operation to `llvm.global` operation. 1030 /// `fir.insert_on_range` operations are replaced with constant dense attribute 1031 /// if they are applied on the full range. 1032 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 1033 using FIROpConversion::FIROpConversion; 1034 1035 mlir::LogicalResult 1036 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 1037 mlir::ConversionPatternRewriter &rewriter) const override { 1038 auto tyAttr = convertType(global.getType()); 1039 if (global.getType().isa<fir::BoxType>()) 1040 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1041 auto loc = global.getLoc(); 1042 mlir::Attribute initAttr{}; 1043 if (global.initVal()) 1044 initAttr = global.initVal().getValue(); 1045 auto linkage = convertLinkage(global.linkName()); 1046 auto isConst = global.constant().hasValue(); 1047 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 1048 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 1049 auto &gr = g.getInitializerRegion(); 1050 rewriter.inlineRegionBefore(global.region(), gr, gr.end()); 1051 if (!gr.empty()) { 1052 // Replace insert_on_range with a constant dense attribute if the 1053 // initialization is on the full range. 1054 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 1055 for (auto insertOp : insertOnRangeOps) { 1056 if (isFullRange(insertOp.coor(), insertOp.getType())) { 1057 auto seqTyAttr = convertType(insertOp.getType()); 1058 auto *op = insertOp.val().getDefiningOp(); 1059 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 1060 if (!constant) { 1061 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 1062 if (!convertOp) 1063 continue; 1064 constant = cast<mlir::arith::ConstantOp>( 1065 convertOp.value().getDefiningOp()); 1066 } 1067 mlir::Type vecType = mlir::VectorType::get( 1068 insertOp.getType().getShape(), constant.getType()); 1069 auto denseAttr = mlir::DenseElementsAttr::get( 1070 vecType.cast<ShapedType>(), constant.getValue()); 1071 rewriter.setInsertionPointAfter(insertOp); 1072 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 1073 insertOp, seqTyAttr, denseAttr); 1074 } 1075 } 1076 } 1077 rewriter.eraseOp(global); 1078 return success(); 1079 } 1080 1081 bool isFullRange(mlir::DenseIntElementsAttr indexes, 1082 fir::SequenceType seqTy) const { 1083 auto extents = seqTy.getShape(); 1084 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 1085 return false; 1086 auto cur_index = indexes.value_begin<int64_t>(); 1087 for (unsigned i = 0; i < indexes.size(); i += 2) { 1088 if (*(cur_index++) != 0) 1089 return false; 1090 if (*(cur_index++) != extents[i / 2] - 1) 1091 return false; 1092 } 1093 return true; 1094 } 1095 1096 // TODO: String comparaison should be avoided. Replace linkName with an 1097 // enumeration. 1098 mlir::LLVM::Linkage convertLinkage(Optional<StringRef> optLinkage) const { 1099 if (optLinkage.hasValue()) { 1100 auto name = optLinkage.getValue(); 1101 if (name == "internal") 1102 return mlir::LLVM::Linkage::Internal; 1103 if (name == "linkonce") 1104 return mlir::LLVM::Linkage::Linkonce; 1105 if (name == "common") 1106 return mlir::LLVM::Linkage::Common; 1107 if (name == "weak") 1108 return mlir::LLVM::Linkage::Weak; 1109 } 1110 return mlir::LLVM::Linkage::External; 1111 } 1112 }; 1113 } // namespace 1114 1115 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 1116 Optional<mlir::ValueRange> destOps, 1117 mlir::ConversionPatternRewriter &rewriter, 1118 mlir::Block *newBlock) { 1119 if (destOps.hasValue()) 1120 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 1121 newBlock, mlir::ValueRange()); 1122 else 1123 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 1124 } 1125 1126 template <typename A, typename B> 1127 static void genBrOp(A caseOp, mlir::Block *dest, Optional<B> destOps, 1128 mlir::ConversionPatternRewriter &rewriter) { 1129 if (destOps.hasValue()) 1130 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 1131 dest); 1132 else 1133 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 1134 } 1135 1136 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 1137 mlir::Block *dest, 1138 Optional<mlir::ValueRange> destOps, 1139 mlir::ConversionPatternRewriter &rewriter) { 1140 auto *thisBlock = rewriter.getInsertionBlock(); 1141 auto *newBlock = createBlock(rewriter, dest); 1142 rewriter.setInsertionPointToEnd(thisBlock); 1143 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 1144 rewriter.setInsertionPointToEnd(newBlock); 1145 } 1146 1147 namespace { 1148 /// Conversion of `fir.select_case` 1149 /// 1150 /// The `fir.select_case` operation is converted to a if-then-else ladder. 1151 /// Depending on the case condition type, one or several comparison and 1152 /// conditional branching can be generated. 1153 /// 1154 /// A a point value case such as `case(4)`, a lower bound case such as 1155 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 1156 /// simple comparison between the selector value and the constant value in the 1157 /// case. The block associated with the case condition is then executed if 1158 /// the comparison succeed otherwise it branch to the next block with the 1159 /// comparison for the the next case conditon. 1160 /// 1161 /// A closed interval case condition such as `case(7:10)` is converted with a 1162 /// first comparison and conditional branching for the lower bound. If 1163 /// successful, it branch to a second block with the comparison for the 1164 /// upper bound in the same case condition. 1165 /// 1166 /// TODO: lowering of CHARACTER type cases is not handled yet. 1167 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 1168 using FIROpConversion::FIROpConversion; 1169 1170 mlir::LogicalResult 1171 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 1172 mlir::ConversionPatternRewriter &rewriter) const override { 1173 unsigned conds = caseOp.getNumConditions(); 1174 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 1175 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 1176 auto ty = caseOp.getSelector().getType(); 1177 if (ty.isa<fir::CharacterType>()) { 1178 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 1179 return failure(); 1180 } 1181 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 1182 auto loc = caseOp.getLoc(); 1183 for (unsigned t = 0; t != conds; ++t) { 1184 mlir::Block *dest = caseOp.getSuccessor(t); 1185 llvm::Optional<mlir::ValueRange> destOps = 1186 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 1187 llvm::Optional<mlir::ValueRange> cmpOps = 1188 *caseOp.getCompareOperands(adaptor.getOperands(), t); 1189 mlir::Value caseArg = *(cmpOps.getValue().begin()); 1190 mlir::Attribute attr = cases[t]; 1191 if (attr.isa<fir::PointIntervalAttr>()) { 1192 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1193 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 1194 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1195 continue; 1196 } 1197 if (attr.isa<fir::LowerBoundAttr>()) { 1198 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1199 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 1200 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1201 continue; 1202 } 1203 if (attr.isa<fir::UpperBoundAttr>()) { 1204 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1205 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 1206 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 1207 continue; 1208 } 1209 if (attr.isa<fir::ClosedIntervalAttr>()) { 1210 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1211 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 1212 auto *thisBlock = rewriter.getInsertionBlock(); 1213 auto *newBlock1 = createBlock(rewriter, dest); 1214 auto *newBlock2 = createBlock(rewriter, dest); 1215 rewriter.setInsertionPointToEnd(thisBlock); 1216 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 1217 rewriter.setInsertionPointToEnd(newBlock1); 1218 mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1); 1219 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 1220 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 1221 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 1222 rewriter.setInsertionPointToEnd(newBlock2); 1223 continue; 1224 } 1225 assert(attr.isa<mlir::UnitAttr>()); 1226 assert((t + 1 == conds) && "unit must be last"); 1227 genBrOp(caseOp, dest, destOps, rewriter); 1228 } 1229 return success(); 1230 } 1231 }; 1232 } // namespace 1233 1234 template <typename OP> 1235 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 1236 typename OP::Adaptor adaptor, 1237 mlir::ConversionPatternRewriter &rewriter) { 1238 unsigned conds = select.getNumConditions(); 1239 auto cases = select.getCases().getValue(); 1240 mlir::Value selector = adaptor.selector(); 1241 auto loc = select.getLoc(); 1242 assert(conds > 0 && "select must have cases"); 1243 1244 llvm::SmallVector<mlir::Block *> destinations; 1245 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 1246 mlir::Block *defaultDestination; 1247 mlir::ValueRange defaultOperands; 1248 llvm::SmallVector<int32_t> caseValues; 1249 1250 for (unsigned t = 0; t != conds; ++t) { 1251 mlir::Block *dest = select.getSuccessor(t); 1252 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 1253 const mlir::Attribute &attr = cases[t]; 1254 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 1255 destinations.push_back(dest); 1256 destinationsOperands.push_back(destOps.hasValue() ? *destOps 1257 : ValueRange()); 1258 caseValues.push_back(intAttr.getInt()); 1259 continue; 1260 } 1261 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 1262 assert((t + 1 == conds) && "unit must be last"); 1263 defaultDestination = dest; 1264 defaultOperands = destOps.hasValue() ? *destOps : ValueRange(); 1265 } 1266 1267 // LLVM::SwitchOp takes a i32 type for the selector. 1268 if (select.getSelector().getType() != rewriter.getI32Type()) 1269 selector = 1270 rewriter.create<LLVM::TruncOp>(loc, rewriter.getI32Type(), selector); 1271 1272 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 1273 select, selector, 1274 /*defaultDestination=*/defaultDestination, 1275 /*defaultOperands=*/defaultOperands, 1276 /*caseValues=*/caseValues, 1277 /*caseDestinations=*/destinations, 1278 /*caseOperands=*/destinationsOperands, 1279 /*branchWeights=*/ArrayRef<int32_t>()); 1280 } 1281 1282 namespace { 1283 /// conversion of fir::SelectOp to an if-then-else ladder 1284 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 1285 using FIROpConversion::FIROpConversion; 1286 1287 mlir::LogicalResult 1288 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 1289 mlir::ConversionPatternRewriter &rewriter) const override { 1290 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 1291 return success(); 1292 } 1293 }; 1294 1295 /// `fir.load` --> `llvm.load` 1296 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 1297 using FIROpConversion::FIROpConversion; 1298 1299 mlir::LogicalResult 1300 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 1301 mlir::ConversionPatternRewriter &rewriter) const override { 1302 // fir.box is a special case because it is considered as an ssa values in 1303 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 1304 // and fir.box end up being the same llvm types and loading a 1305 // fir.ref<fir.box> is actually a no op in LLVM. 1306 if (load.getType().isa<fir::BoxType>()) { 1307 rewriter.replaceOp(load, adaptor.getOperands()[0]); 1308 } else { 1309 mlir::Type ty = convertType(load.getType()); 1310 ArrayRef<NamedAttribute> at = load->getAttrs(); 1311 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 1312 load, ty, adaptor.getOperands(), at); 1313 } 1314 return success(); 1315 } 1316 }; 1317 1318 /// Lower `fir.no_reassoc` to LLVM IR dialect. 1319 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 1320 /// math flags? 1321 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 1322 using FIROpConversion::FIROpConversion; 1323 1324 mlir::LogicalResult 1325 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 1326 mlir::ConversionPatternRewriter &rewriter) const override { 1327 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 1328 return success(); 1329 } 1330 }; 1331 1332 /// Lower `fir.select_type` to LLVM IR dialect. 1333 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 1334 using FIROpConversion::FIROpConversion; 1335 1336 mlir::LogicalResult 1337 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 1338 mlir::ConversionPatternRewriter &rewriter) const override { 1339 mlir::emitError(select.getLoc(), 1340 "fir.select_type should have already been converted"); 1341 return failure(); 1342 } 1343 }; 1344 1345 /// conversion of fir::SelectRankOp to an if-then-else ladder 1346 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 1347 using FIROpConversion::FIROpConversion; 1348 1349 mlir::LogicalResult 1350 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 1351 mlir::ConversionPatternRewriter &rewriter) const override { 1352 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 1353 return success(); 1354 } 1355 }; 1356 1357 /// `fir.store` --> `llvm.store` 1358 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 1359 using FIROpConversion::FIROpConversion; 1360 1361 mlir::LogicalResult 1362 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 1363 mlir::ConversionPatternRewriter &rewriter) const override { 1364 if (store.value().getType().isa<fir::BoxType>()) { 1365 // fir.box value is actually in memory, load it first before storing it. 1366 mlir::Location loc = store.getLoc(); 1367 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 1368 auto val = rewriter.create<mlir::LLVM::LoadOp>( 1369 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 1370 adaptor.getOperands()[0]); 1371 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 1372 store, val, adaptor.getOperands()[1]); 1373 } else { 1374 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 1375 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 1376 } 1377 return success(); 1378 } 1379 }; 1380 1381 /// convert to LLVM IR dialect `undef` 1382 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 1383 using FIROpConversion::FIROpConversion; 1384 1385 mlir::LogicalResult 1386 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 1387 mlir::ConversionPatternRewriter &rewriter) const override { 1388 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 1389 undef, convertType(undef.getType())); 1390 return success(); 1391 } 1392 }; 1393 1394 /// `fir.unreachable` --> `llvm.unreachable` 1395 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 1396 using FIROpConversion::FIROpConversion; 1397 1398 mlir::LogicalResult 1399 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 1400 mlir::ConversionPatternRewriter &rewriter) const override { 1401 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 1402 return success(); 1403 } 1404 }; 1405 1406 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 1407 using FIROpConversion::FIROpConversion; 1408 1409 mlir::LogicalResult 1410 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 1411 mlir::ConversionPatternRewriter &rewriter) const override { 1412 mlir::Type ty = convertType(zero.getType()); 1413 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 1414 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 1415 } else if (ty.isa<mlir::IntegerType>()) { 1416 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 1417 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 1418 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 1419 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 1420 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 1421 } else { 1422 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 1423 return rewriter.notifyMatchFailure( 1424 zero, 1425 "conversion of fir.zero with aggregate type not implemented yet"); 1426 } 1427 return success(); 1428 } 1429 }; 1430 } // namespace 1431 1432 /// Common base class for embox to descriptor conversion. 1433 template <typename OP> 1434 struct EmboxCommonConversion : public FIROpConversion<OP> { 1435 using FIROpConversion<OP>::FIROpConversion; 1436 1437 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1438 // The order to find the LLVMFuncOp is as follows: 1439 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1440 // 2. The first ancestor that is a LLVMFuncOp. 1441 mlir::LLVM::LLVMFuncOp 1442 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1443 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1444 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1445 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1446 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1447 } 1448 1449 // Generate an alloca of size 1 and type \p toTy. 1450 mlir::LLVM::AllocaOp 1451 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1452 mlir::ConversionPatternRewriter &rewriter) const { 1453 auto thisPt = rewriter.saveInsertionPoint(); 1454 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1455 rewriter.setInsertionPointToStart(&func.front()); 1456 auto size = this->genI32Constant(loc, rewriter, 1); 1457 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1458 rewriter.restoreInsertionPoint(thisPt); 1459 return al; 1460 } 1461 1462 static int getCFIAttr(fir::BoxType boxTy) { 1463 auto eleTy = boxTy.getEleTy(); 1464 if (eleTy.isa<fir::PointerType>()) 1465 return CFI_attribute_pointer; 1466 if (eleTy.isa<fir::HeapType>()) 1467 return CFI_attribute_allocatable; 1468 return CFI_attribute_other; 1469 } 1470 1471 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1472 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1473 .template dyn_cast<fir::RecordType>(); 1474 } 1475 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1476 auto recTy = unwrapIfDerived(boxTy); 1477 return recTy && recTy.getNumLenParams() > 0; 1478 } 1479 static bool isDerivedType(fir::BoxType boxTy) { 1480 return unwrapIfDerived(boxTy) != nullptr; 1481 } 1482 1483 // Get the element size and CFI type code of the boxed value. 1484 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1485 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1486 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1487 auto doInteger = 1488 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1489 int typeCode = fir::integerBitsToTypeCode(width); 1490 return {this->genConstantOffset(loc, rewriter, width / 8), 1491 this->genConstantOffset(loc, rewriter, typeCode)}; 1492 }; 1493 auto doLogical = 1494 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1495 int typeCode = fir::logicalBitsToTypeCode(width); 1496 return {this->genConstantOffset(loc, rewriter, width / 8), 1497 this->genConstantOffset(loc, rewriter, typeCode)}; 1498 }; 1499 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1500 int typeCode = fir::realBitsToTypeCode(width); 1501 return {this->genConstantOffset(loc, rewriter, width / 8), 1502 this->genConstantOffset(loc, rewriter, typeCode)}; 1503 }; 1504 auto doComplex = 1505 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1506 auto typeCode = fir::complexBitsToTypeCode(width); 1507 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1508 this->genConstantOffset(loc, rewriter, typeCode)}; 1509 }; 1510 auto doCharacter = 1511 [&](unsigned width, 1512 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1513 auto typeCode = fir::characterBitsToTypeCode(width); 1514 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1515 if (width == 8) 1516 return {len, typeCodeVal}; 1517 auto byteWidth = this->genConstantOffset(loc, rewriter, width / 8); 1518 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1519 auto size = 1520 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len); 1521 return {size, typeCodeVal}; 1522 }; 1523 auto getKindMap = [&]() -> fir::KindMapping & { 1524 return this->lowerTy().getKindMap(); 1525 }; 1526 // Pointer-like types. 1527 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1528 boxEleTy = eleTy; 1529 // Integer types. 1530 if (fir::isa_integer(boxEleTy)) { 1531 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1532 return doInteger(ty.getWidth()); 1533 auto ty = boxEleTy.cast<fir::IntegerType>(); 1534 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1535 } 1536 // Floating point types. 1537 if (fir::isa_real(boxEleTy)) { 1538 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1539 return doFloat(ty.getWidth()); 1540 auto ty = boxEleTy.cast<fir::RealType>(); 1541 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1542 } 1543 // Complex types. 1544 if (fir::isa_complex(boxEleTy)) { 1545 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1546 return doComplex( 1547 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1548 auto ty = boxEleTy.cast<fir::ComplexType>(); 1549 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1550 } 1551 // Character types. 1552 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1553 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1554 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1555 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1556 return doCharacter(charWidth, len); 1557 } 1558 assert(!lenParams.empty()); 1559 return doCharacter(charWidth, lenParams.back()); 1560 } 1561 // Logical type. 1562 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1563 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1564 // Array types. 1565 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1566 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1567 // Derived-type types. 1568 if (boxEleTy.isa<fir::RecordType>()) { 1569 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1570 this->lowerTy().convertType(boxEleTy)); 1571 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1572 auto one = 1573 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1574 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1575 mlir::ValueRange{one}); 1576 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1577 loc, this->lowerTy().indexType(), gep); 1578 return {eleSize, 1579 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1580 } 1581 // Reference type. 1582 if (fir::isa_ref_type(boxEleTy)) { 1583 // FIXME: use the target pointer size rather than sizeof(void*) 1584 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1585 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1586 } 1587 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1588 } 1589 1590 /// Basic pattern to write a field in the descriptor 1591 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1592 mlir::Location loc, mlir::Value dest, 1593 ArrayRef<unsigned> fldIndexes, mlir::Value value, 1594 bool bitcast = false) const { 1595 auto boxTy = dest.getType(); 1596 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1597 if (bitcast) 1598 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1599 else 1600 value = this->integerCast(loc, rewriter, fldTy, value); 1601 SmallVector<mlir::Attribute, 2> attrs; 1602 for (auto i : fldIndexes) 1603 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1604 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1605 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1606 indexesAttr); 1607 } 1608 1609 inline mlir::Value 1610 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1611 mlir::Location loc, mlir::Value dest, 1612 mlir::Value base) const { 1613 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1614 /*bitCast=*/true); 1615 } 1616 1617 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1618 mlir::Location loc, mlir::Value dest, 1619 unsigned dim, mlir::Value lb) const { 1620 return insertField(rewriter, loc, dest, 1621 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1622 } 1623 1624 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1625 mlir::Location loc, mlir::Value dest, 1626 unsigned dim, mlir::Value extent) const { 1627 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1628 extent); 1629 } 1630 1631 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1632 mlir::Location loc, mlir::Value dest, 1633 unsigned dim, mlir::Value stride) const { 1634 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1635 stride); 1636 } 1637 1638 /// Get the address of the type descriptor global variable that was created by 1639 /// lowering for derived type \p recType. 1640 template <typename BOX> 1641 mlir::Value 1642 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1643 mlir::Location loc, fir::RecordType recType) const { 1644 std::string name = recType.translateNameToFrontendMangledName(); 1645 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1646 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1647 auto ty = mlir::LLVM::LLVMPointerType::get( 1648 this->lowerTy().convertType(global.getType())); 1649 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1650 global.getSymName()); 1651 } 1652 if (auto global = 1653 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1654 // The global may have already been translated to LLVM. 1655 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1656 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1657 global.getSymName()); 1658 } 1659 // The global does not exist in the current translation unit, but may be 1660 // defined elsewhere (e.g., type defined in a module). 1661 // For now, create a extern_weak symbol (will become nullptr if unresolved) 1662 // to support generating code without the front-end generated symbols. 1663 // These could be made available_externally to require the symbols to be 1664 // defined elsewhere and to cause link-time failure otherwise. 1665 auto i8Ty = rewriter.getIntegerType(8); 1666 mlir::OpBuilder modBuilder(module.getBodyRegion()); 1667 // TODO: The symbol should be lowered to constant in lowering, they are read 1668 // only. 1669 modBuilder.create<mlir::LLVM::GlobalOp>(loc, i8Ty, /*isConstant=*/false, 1670 mlir::LLVM::Linkage::ExternWeak, 1671 name, mlir::Attribute{}); 1672 auto ty = mlir::LLVM::LLVMPointerType::get(i8Ty); 1673 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, name); 1674 } 1675 1676 template <typename BOX> 1677 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1678 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1679 unsigned rank, mlir::ValueRange lenParams) const { 1680 auto loc = box.getLoc(); 1681 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1682 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1683 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1684 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1685 mlir::Value descriptor = 1686 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1687 1688 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1689 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1690 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1691 typeparams.push_back(box.substr()[1]); 1692 } 1693 1694 // Write each of the fields with the appropriate values 1695 auto [eleSize, cfiTy] = 1696 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1697 descriptor = 1698 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1699 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1700 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1701 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1702 this->genI32Constant(loc, rewriter, rank)); 1703 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1704 descriptor = 1705 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1706 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1707 const bool hasAddendum = isDerivedType(boxTy); 1708 descriptor = 1709 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1710 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1711 1712 if (hasAddendum) { 1713 auto isArray = 1714 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1715 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1716 auto typeDesc = 1717 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1718 descriptor = 1719 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1720 /*bitCast=*/true); 1721 } 1722 1723 return {boxTy, descriptor, eleSize}; 1724 } 1725 1726 /// Compute the base address of a substring given the base address of a scalar 1727 /// string and the zero based string lower bound. 1728 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1729 mlir::Location loc, mlir::Value base, 1730 mlir::Value lowerBound) const { 1731 llvm::SmallVector<mlir::Value> gepOperands; 1732 auto baseType = 1733 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1734 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1735 auto idxTy = this->lowerTy().indexType(); 1736 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1737 gepOperands.push_back(zero); 1738 } 1739 gepOperands.push_back(lowerBound); 1740 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1741 } 1742 1743 /// If the embox is not in a globalOp body, allocate storage for the box; 1744 /// store the value inside and return the generated alloca. Return the input 1745 /// value otherwise. 1746 mlir::Value 1747 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1748 mlir::Location loc, mlir::Value boxValue) const { 1749 auto *thisBlock = rewriter.getInsertionBlock(); 1750 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1751 return boxValue; 1752 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1753 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1754 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1755 return alloca; 1756 } 1757 }; 1758 1759 /// Compute the extent of a triplet slice (lb:ub:step). 1760 static mlir::Value 1761 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1762 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1763 mlir::Value step, mlir::Value zero, mlir::Type type) { 1764 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1765 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1766 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1767 // If the resulting extent is negative (`ub-lb` and `step` have different 1768 // signs), zero must be returned instead. 1769 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1770 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1771 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1772 } 1773 1774 /// Create a generic box on a memory reference. This conversions lowers the 1775 /// abstract box to the appropriate, initialized descriptor. 1776 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1777 using EmboxCommonConversion::EmboxCommonConversion; 1778 1779 mlir::LogicalResult 1780 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1781 mlir::ConversionPatternRewriter &rewriter) const override { 1782 assert(!embox.getShape() && "There should be no dims on this embox op"); 1783 auto [boxTy, dest, eleSize] = 1784 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1785 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1786 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1787 adaptor.getOperands()[0]); 1788 if (isDerivedTypeWithLenParams(boxTy)) { 1789 TODO(embox.getLoc(), 1790 "fir.embox codegen of derived with length parameters"); 1791 return failure(); 1792 } 1793 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1794 rewriter.replaceOp(embox, result); 1795 return success(); 1796 } 1797 }; 1798 1799 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1800 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1801 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1802 using FIROpConversion::FIROpConversion; 1803 1804 mlir::LogicalResult 1805 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1806 mlir::ConversionPatternRewriter &rewriter) const override { 1807 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1808 return failure(); 1809 } 1810 }; 1811 1812 /// Create a generic box on a memory reference. 1813 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1814 using EmboxCommonConversion::EmboxCommonConversion; 1815 1816 mlir::LogicalResult 1817 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1818 mlir::ConversionPatternRewriter &rewriter) const override { 1819 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1820 xbox, rewriter, xbox.getOutRank(), 1821 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1822 // Generate the triples in the dims field of the descriptor 1823 mlir::ValueRange operands = adaptor.getOperands(); 1824 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1825 mlir::Value base = operands[0]; 1826 assert(!xbox.shape().empty() && "must have a shape"); 1827 unsigned shapeOffset = xbox.shapeOffset(); 1828 bool hasShift = !xbox.shift().empty(); 1829 unsigned shiftOffset = xbox.shiftOffset(); 1830 bool hasSlice = !xbox.slice().empty(); 1831 unsigned sliceOffset = xbox.sliceOffset(); 1832 mlir::Location loc = xbox.getLoc(); 1833 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1834 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1835 mlir::Value prevDim = integerCast(loc, rewriter, i64Ty, eleSize); 1836 mlir::Value prevPtrOff = one; 1837 mlir::Type eleTy = boxTy.getEleTy(); 1838 const unsigned rank = xbox.getRank(); 1839 llvm::SmallVector<mlir::Value> gepArgs; 1840 unsigned constRows = 0; 1841 mlir::Value ptrOffset = zero; 1842 if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType())) 1843 if (auto seqTy = memEleTy.dyn_cast<fir::SequenceType>()) { 1844 mlir::Type seqEleTy = seqTy.getEleTy(); 1845 // Adjust the element scaling factor if the element is a dependent type. 1846 if (fir::hasDynamicSize(seqEleTy)) { 1847 if (fir::isa_char(seqEleTy)) { 1848 assert(xbox.lenParams().size() == 1); 1849 prevPtrOff = integerCast(loc, rewriter, i64Ty, 1850 operands[xbox.lenParamOffset()]); 1851 } else if (seqEleTy.isa<fir::RecordType>()) { 1852 TODO(loc, "generate call to calculate size of PDT"); 1853 } else { 1854 return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type"); 1855 } 1856 } else { 1857 constRows = seqTy.getConstantRows(); 1858 } 1859 } 1860 1861 bool hasSubcomp = !xbox.subcomponent().empty(); 1862 mlir::Value stepExpr; 1863 if (hasSubcomp) { 1864 // We have a subcomponent. The step value needs to be the number of 1865 // bytes per element (which is a derived type). 1866 mlir::Type ty0 = base.getType(); 1867 [[maybe_unused]] auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1868 assert(ptrTy && "expected pointer type"); 1869 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1870 assert(memEleTy && "expected fir pointer type"); 1871 auto seqTy = memEleTy.dyn_cast<fir::SequenceType>(); 1872 assert(seqTy && "expected sequence type"); 1873 mlir::Type seqEleTy = seqTy.getEleTy(); 1874 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1875 stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1876 } 1877 1878 // Process the array subspace arguments (shape, shift, etc.), if any, 1879 // translating everything to values in the descriptor wherever the entity 1880 // has a dynamic array dimension. 1881 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1882 mlir::Value extent = operands[shapeOffset]; 1883 mlir::Value outerExtent = extent; 1884 bool skipNext = false; 1885 if (hasSlice) { 1886 mlir::Value off = operands[sliceOffset]; 1887 mlir::Value adj = one; 1888 if (hasShift) 1889 adj = operands[shiftOffset]; 1890 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1891 if (constRows > 0) { 1892 gepArgs.push_back(ao); 1893 --constRows; 1894 } else { 1895 auto dimOff = 1896 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1897 ptrOffset = 1898 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1899 } 1900 if (mlir::isa_and_nonnull<fir::UndefOp>( 1901 xbox.slice()[3 * di + 1].getDefiningOp())) { 1902 // This dimension contains a scalar expression in the array slice op. 1903 // The dimension is loop invariant, will be dropped, and will not 1904 // appear in the descriptor. 1905 skipNext = true; 1906 } 1907 } 1908 if (!skipNext) { 1909 // store lower bound (normally 0) 1910 mlir::Value lb = zero; 1911 if (eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>()) { 1912 lb = one; 1913 if (hasShift) 1914 lb = operands[shiftOffset]; 1915 } 1916 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1917 1918 // store extent 1919 if (hasSlice) 1920 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1921 operands[sliceOffset + 1], 1922 operands[sliceOffset + 2], zero, i64Ty); 1923 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1924 1925 // store step (scaled by shaped extent) 1926 1927 mlir::Value step = hasSubcomp ? stepExpr : prevDim; 1928 if (hasSlice) 1929 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1930 operands[sliceOffset + 2]); 1931 dest = insertStride(rewriter, loc, dest, descIdx, step); 1932 ++descIdx; 1933 } 1934 1935 // compute the stride and offset for the next natural dimension 1936 prevDim = 1937 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent); 1938 if (constRows == 0) 1939 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1940 outerExtent); 1941 1942 // increment iterators 1943 ++shapeOffset; 1944 if (hasShift) 1945 ++shiftOffset; 1946 if (hasSlice) 1947 sliceOffset += 3; 1948 } 1949 if (hasSlice || hasSubcomp || !xbox.substr().empty()) { 1950 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1951 args.append(gepArgs.rbegin(), gepArgs.rend()); 1952 if (hasSubcomp) { 1953 // For each field in the path add the offset to base via the args list. 1954 // In the most general case, some offsets must be computed since 1955 // they are not be known until runtime. 1956 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1957 fir::unwrapPassByRefType(xbox.memref().getType())))) 1958 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1959 args.append(operands.begin() + xbox.subcomponentOffset(), 1960 operands.begin() + xbox.subcomponentOffset() + 1961 xbox.subcomponent().size()); 1962 } 1963 base = 1964 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1965 if (!xbox.substr().empty()) 1966 base = shiftSubstringBase(rewriter, loc, base, 1967 operands[xbox.substrOffset()]); 1968 } 1969 dest = insertBaseAddress(rewriter, loc, dest, base); 1970 if (isDerivedTypeWithLenParams(boxTy)) 1971 TODO(loc, "fir.embox codegen of derived with length parameters"); 1972 1973 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1974 rewriter.replaceOp(xbox, result); 1975 return success(); 1976 } 1977 }; 1978 1979 /// Create a new box given a box reference. 1980 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1981 using EmboxCommonConversion::EmboxCommonConversion; 1982 1983 mlir::LogicalResult 1984 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1985 mlir::ConversionPatternRewriter &rewriter) const override { 1986 mlir::Location loc = rebox.getLoc(); 1987 mlir::Type idxTy = lowerTy().indexType(); 1988 mlir::Value loweredBox = adaptor.getOperands()[0]; 1989 mlir::ValueRange operands = adaptor.getOperands(); 1990 1991 // Create new descriptor and fill its non-shape related data. 1992 llvm::SmallVector<mlir::Value, 2> lenParams; 1993 mlir::Type inputEleTy = getInputEleTy(rebox); 1994 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1995 mlir::Value len = 1996 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1997 if (charTy.getFKind() != 1) { 1998 mlir::Value width = 1999 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 2000 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 2001 } 2002 lenParams.emplace_back(len); 2003 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 2004 if (recTy.getNumLenParams() != 0) 2005 TODO(loc, "reboxing descriptor of derived type with length parameters"); 2006 } 2007 auto [boxTy, dest, eleSize] = 2008 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 2009 2010 // Read input extents, strides, and base address 2011 llvm::SmallVector<mlir::Value> inputExtents; 2012 llvm::SmallVector<mlir::Value> inputStrides; 2013 const unsigned inputRank = rebox.getRank(); 2014 for (unsigned i = 0; i < inputRank; ++i) { 2015 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 2016 SmallVector<mlir::Value, 3> dimInfo = 2017 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 2018 inputExtents.emplace_back(dimInfo[1]); 2019 inputStrides.emplace_back(dimInfo[2]); 2020 } 2021 2022 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 2023 mlir::Value baseAddr = 2024 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 2025 2026 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 2027 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 2028 operands, rewriter); 2029 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 2030 operands, rewriter); 2031 } 2032 2033 private: 2034 /// Write resulting shape and base address in descriptor, and replace rebox 2035 /// op. 2036 mlir::LogicalResult 2037 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2038 mlir::ValueRange lbounds, mlir::ValueRange extents, 2039 mlir::ValueRange strides, 2040 mlir::ConversionPatternRewriter &rewriter) const { 2041 mlir::Location loc = rebox.getLoc(); 2042 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 2043 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 2044 unsigned dim = iter.index(); 2045 mlir::Value lb = lbounds.empty() ? one : lbounds[dim]; 2046 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 2047 dest = insertExtent(rewriter, loc, dest, dim, std::get<0>(iter.value())); 2048 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 2049 } 2050 dest = insertBaseAddress(rewriter, loc, dest, base); 2051 mlir::Value result = 2052 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 2053 rewriter.replaceOp(rebox, result); 2054 return success(); 2055 } 2056 2057 // Apply slice given the base address, extents and strides of the input box. 2058 mlir::LogicalResult 2059 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2060 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 2061 mlir::ValueRange operands, 2062 mlir::ConversionPatternRewriter &rewriter) const { 2063 mlir::Location loc = rebox.getLoc(); 2064 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 2065 mlir::Type idxTy = lowerTy().indexType(); 2066 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2067 // Apply subcomponent and substring shift on base address. 2068 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 2069 // Cast to inputEleTy* so that a GEP can be used. 2070 mlir::Type inputEleTy = getInputEleTy(rebox); 2071 auto llvmElePtrTy = 2072 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 2073 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 2074 2075 if (!rebox.subcomponent().empty()) { 2076 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 2077 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 2078 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 2079 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 2080 } 2081 if (!rebox.substr().empty()) 2082 base = shiftSubstringBase(rewriter, loc, base, 2083 operands[rebox.substrOffset()]); 2084 } 2085 2086 if (rebox.slice().empty()) 2087 // The array section is of the form array[%component][substring], keep 2088 // the input array extents and strides. 2089 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 2090 inputExtents, inputStrides, rewriter); 2091 2092 // Strides from the fir.box are in bytes. 2093 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2094 2095 // The slice is of the form array(i:j:k)[%component]. Compute new extents 2096 // and strides. 2097 llvm::SmallVector<mlir::Value> slicedExtents; 2098 llvm::SmallVector<mlir::Value> slicedStrides; 2099 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2100 const bool sliceHasOrigins = !rebox.shift().empty(); 2101 unsigned sliceOps = rebox.sliceOffset(); 2102 unsigned shiftOps = rebox.shiftOffset(); 2103 auto strideOps = inputStrides.begin(); 2104 const unsigned inputRank = inputStrides.size(); 2105 for (unsigned i = 0; i < inputRank; 2106 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 2107 mlir::Value sliceLb = 2108 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 2109 mlir::Value inputStride = *strideOps; // already idxTy 2110 // Apply origin shift: base += (lb-shift)*input_stride 2111 mlir::Value sliceOrigin = 2112 sliceHasOrigins 2113 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 2114 : one; 2115 mlir::Value diff = 2116 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 2117 mlir::Value offset = 2118 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 2119 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 2120 // Apply upper bound and step if this is a triplet. Otherwise, the 2121 // dimension is dropped and no extents/strides are computed. 2122 mlir::Value upper = operands[sliceOps + 1]; 2123 const bool isTripletSlice = 2124 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 2125 if (isTripletSlice) { 2126 mlir::Value step = 2127 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 2128 // extent = ub-lb+step/step 2129 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 2130 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 2131 sliceUb, step, zero, idxTy); 2132 slicedExtents.emplace_back(extent); 2133 // stride = step*input_stride 2134 mlir::Value stride = 2135 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 2136 slicedStrides.emplace_back(stride); 2137 } 2138 } 2139 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 2140 slicedExtents, slicedStrides, rewriter); 2141 } 2142 2143 /// Apply a new shape to the data described by a box given the base address, 2144 /// extents and strides of the box. 2145 mlir::LogicalResult 2146 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 2147 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 2148 mlir::ValueRange operands, 2149 mlir::ConversionPatternRewriter &rewriter) const { 2150 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 2151 operands.begin() + rebox.shiftOffset() + 2152 rebox.shift().size()}; 2153 if (rebox.shape().empty()) { 2154 // Only setting new lower bounds. 2155 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 2156 inputStrides, rewriter); 2157 } 2158 2159 mlir::Location loc = rebox.getLoc(); 2160 // Strides from the fir.box are in bytes. 2161 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 2162 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2163 2164 llvm::SmallVector<mlir::Value> newStrides; 2165 llvm::SmallVector<mlir::Value> newExtents; 2166 mlir::Type idxTy = lowerTy().indexType(); 2167 // First stride from input box is kept. The rest is assumed contiguous 2168 // (it is not possible to reshape otherwise). If the input is scalar, 2169 // which may be OK if all new extents are ones, the stride does not 2170 // matter, use one. 2171 mlir::Value stride = inputStrides.empty() 2172 ? genConstantIndex(loc, idxTy, rewriter, 1) 2173 : inputStrides[0]; 2174 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 2175 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 2176 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 2177 newExtents.emplace_back(extent); 2178 newStrides.emplace_back(stride); 2179 // nextStride = extent * stride; 2180 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 2181 } 2182 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 2183 rewriter); 2184 } 2185 2186 /// Return scalar element type of the input box. 2187 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 2188 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 2189 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 2190 return seqTy.getEleTy(); 2191 return ty; 2192 } 2193 }; 2194 2195 // Code shared between insert_value and extract_value Ops. 2196 struct ValueOpCommon { 2197 // Translate the arguments pertaining to any multidimensional array to 2198 // row-major order for LLVM-IR. 2199 static void toRowMajor(SmallVectorImpl<mlir::Attribute> &attrs, 2200 mlir::Type ty) { 2201 assert(ty && "type is null"); 2202 const auto end = attrs.size(); 2203 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 2204 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2205 const auto dim = getDimension(seq); 2206 if (dim > 1) { 2207 auto ub = std::min(i + dim, end); 2208 std::reverse(attrs.begin() + i, attrs.begin() + ub); 2209 i += dim - 1; 2210 } 2211 ty = getArrayElementType(seq); 2212 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 2213 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 2214 } else { 2215 llvm_unreachable("index into invalid type"); 2216 } 2217 } 2218 } 2219 2220 static llvm::SmallVector<mlir::Attribute> 2221 collectIndices(mlir::ConversionPatternRewriter &rewriter, 2222 mlir::ArrayAttr arrAttr) { 2223 llvm::SmallVector<mlir::Attribute> attrs; 2224 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 2225 if (i->isa<mlir::IntegerAttr>()) { 2226 attrs.push_back(*i); 2227 } else { 2228 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 2229 ++i; 2230 auto ty = i->cast<mlir::TypeAttr>().getValue(); 2231 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 2232 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 2233 } 2234 } 2235 return attrs; 2236 } 2237 2238 private: 2239 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 2240 unsigned result = 1; 2241 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 2242 eleTy; 2243 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 2244 ++result; 2245 return result; 2246 } 2247 2248 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 2249 auto eleTy = ty.getElementType(); 2250 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2251 eleTy = arrTy.getElementType(); 2252 return eleTy; 2253 } 2254 }; 2255 2256 namespace { 2257 /// Extract a subobject value from an ssa-value of aggregate type 2258 struct ExtractValueOpConversion 2259 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 2260 public ValueOpCommon { 2261 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2262 2263 mlir::LogicalResult 2264 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 2265 mlir::ConversionPatternRewriter &rewriter) const override { 2266 auto attrs = collectIndices(rewriter, extractVal.coor()); 2267 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 2268 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 2269 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 2270 extractVal, ty, adaptor.getOperands()[0], position); 2271 return success(); 2272 } 2273 }; 2274 2275 /// InsertValue is the generalized instruction for the composition of new 2276 /// aggregate type values. 2277 struct InsertValueOpConversion 2278 : public FIROpAndTypeConversion<fir::InsertValueOp>, 2279 public ValueOpCommon { 2280 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2281 2282 mlir::LogicalResult 2283 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 2284 mlir::ConversionPatternRewriter &rewriter) const override { 2285 auto attrs = collectIndices(rewriter, insertVal.coor()); 2286 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 2287 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 2288 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2289 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 2290 position); 2291 return success(); 2292 } 2293 }; 2294 2295 /// InsertOnRange inserts a value into a sequence over a range of offsets. 2296 struct InsertOnRangeOpConversion 2297 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 2298 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2299 2300 // Increments an array of subscripts in a row major fasion. 2301 void incrementSubscripts(const SmallVector<uint64_t> &dims, 2302 SmallVector<uint64_t> &subscripts) const { 2303 for (size_t i = dims.size(); i > 0; --i) { 2304 if (++subscripts[i - 1] < dims[i - 1]) { 2305 return; 2306 } 2307 subscripts[i - 1] = 0; 2308 } 2309 } 2310 2311 mlir::LogicalResult 2312 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 2313 mlir::ConversionPatternRewriter &rewriter) const override { 2314 2315 llvm::SmallVector<uint64_t> dims; 2316 auto type = adaptor.getOperands()[0].getType(); 2317 2318 // Iteratively extract the array dimensions from the type. 2319 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2320 dims.push_back(t.getNumElements()); 2321 type = t.getElementType(); 2322 } 2323 2324 SmallVector<uint64_t> lBounds; 2325 SmallVector<uint64_t> uBounds; 2326 2327 // Unzip the upper and lower bound and convert to a row major format. 2328 mlir::DenseIntElementsAttr coor = range.coor(); 2329 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2330 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2331 uBounds.push_back(*i++); 2332 lBounds.push_back(*i); 2333 } 2334 2335 auto &subscripts = lBounds; 2336 auto loc = range.getLoc(); 2337 mlir::Value lastOp = adaptor.getOperands()[0]; 2338 mlir::Value insertVal = adaptor.getOperands()[1]; 2339 2340 auto i64Ty = rewriter.getI64Type(); 2341 while (subscripts != uBounds) { 2342 // Convert uint64_t's to Attribute's. 2343 SmallVector<mlir::Attribute> subscriptAttrs; 2344 for (const auto &subscript : subscripts) 2345 subscriptAttrs.push_back(IntegerAttr::get(i64Ty, subscript)); 2346 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2347 loc, ty, lastOp, insertVal, 2348 ArrayAttr::get(range.getContext(), subscriptAttrs)); 2349 2350 incrementSubscripts(dims, subscripts); 2351 } 2352 2353 // Convert uint64_t's to Attribute's. 2354 SmallVector<mlir::Attribute> subscriptAttrs; 2355 for (const auto &subscript : subscripts) 2356 subscriptAttrs.push_back( 2357 IntegerAttr::get(rewriter.getI64Type(), subscript)); 2358 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2359 2360 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2361 range, ty, lastOp, insertVal, 2362 ArrayAttr::get(range.getContext(), arrayRef)); 2363 2364 return success(); 2365 } 2366 }; 2367 } // namespace 2368 2369 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2370 /// shifted etc. array. 2371 /// (See the static restriction on coordinate_of.) array_coor determines the 2372 /// coordinate (location) of a specific element. 2373 struct XArrayCoorOpConversion 2374 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2375 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2376 2377 mlir::LogicalResult 2378 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2379 mlir::ConversionPatternRewriter &rewriter) const override { 2380 auto loc = coor.getLoc(); 2381 mlir::ValueRange operands = adaptor.getOperands(); 2382 unsigned rank = coor.getRank(); 2383 assert(coor.indices().size() == rank); 2384 assert(coor.shape().empty() || coor.shape().size() == rank); 2385 assert(coor.shift().empty() || coor.shift().size() == rank); 2386 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2387 mlir::Type idxTy = lowerTy().indexType(); 2388 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2389 mlir::Value prevExt = one; 2390 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2391 mlir::Value offset = zero; 2392 const bool isShifted = !coor.shift().empty(); 2393 const bool isSliced = !coor.slice().empty(); 2394 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2395 2396 auto indexOps = coor.indices().begin(); 2397 auto shapeOps = coor.shape().begin(); 2398 auto shiftOps = coor.shift().begin(); 2399 auto sliceOps = coor.slice().begin(); 2400 // For each dimension of the array, generate the offset calculation. 2401 for (unsigned i = 0; i < rank; 2402 ++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) { 2403 mlir::Value index = 2404 integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); 2405 mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, 2406 operands[coor.shiftOffset() + i]) 2407 : one; 2408 mlir::Value step = one; 2409 bool normalSlice = isSliced; 2410 // Compute zero based index in dimension i of the element, applying 2411 // potential triplets and lower bounds. 2412 if (isSliced) { 2413 mlir::Value ub = *(sliceOps + 1); 2414 normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp()); 2415 if (normalSlice) 2416 step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2)); 2417 } 2418 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2419 mlir::Value diff = 2420 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2421 if (normalSlice) { 2422 mlir::Value sliceLb = 2423 integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]); 2424 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2425 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2426 } 2427 // Update the offset given the stride and the zero based index `diff` 2428 // that was just computed. 2429 if (baseIsBoxed) { 2430 // Use stride in bytes from the descriptor. 2431 mlir::Value stride = 2432 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2433 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2434 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2435 } else { 2436 // Use stride computed at last iteration. 2437 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2438 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2439 // Compute next stride assuming contiguity of the base array 2440 // (in element number). 2441 auto nextExt = 2442 integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]); 2443 prevExt = 2444 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2445 } 2446 } 2447 2448 // Add computed offset to the base address. 2449 if (baseIsBoxed) { 2450 // Working with byte offsets. The base address is read from the fir.box. 2451 // and need to be casted to i8* to do the pointer arithmetic. 2452 mlir::Type baseTy = 2453 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2454 mlir::Value base = 2455 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2456 mlir::Type voidPtrTy = getVoidPtrType(); 2457 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2458 llvm::SmallVector<mlir::Value> args{offset}; 2459 auto addr = 2460 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2461 if (coor.subcomponent().empty()) { 2462 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2463 return success(); 2464 } 2465 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2466 args.clear(); 2467 args.push_back(zero); 2468 if (!coor.lenParams().empty()) { 2469 // If type parameters are present, then we don't want to use a GEPOp 2470 // as below, as the LLVM struct type cannot be statically defined. 2471 TODO(loc, "derived type with type parameters"); 2472 } 2473 // TODO: array offset subcomponents must be converted to LLVM's 2474 // row-major layout here. 2475 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2476 args.push_back(operands[i]); 2477 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2478 args); 2479 return success(); 2480 } 2481 2482 // The array was not boxed, so it must be contiguous. offset is therefore an 2483 // element offset and the base type is kept in the GEP unless the element 2484 // type size is itself dynamic. 2485 mlir::Value base; 2486 if (coor.subcomponent().empty()) { 2487 // No subcomponent. 2488 if (!coor.lenParams().empty()) { 2489 // Type parameters. Adjust element size explicitly. 2490 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2491 assert(eleTy && "result must be a reference-like type"); 2492 if (fir::characterWithDynamicLen(eleTy)) { 2493 assert(coor.lenParams().size() == 1); 2494 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2495 eleTy.cast<fir::CharacterType>().getFKind()); 2496 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2497 auto scaledBySize = 2498 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2499 auto length = 2500 integerCast(loc, rewriter, idxTy, 2501 adaptor.getOperands()[coor.lenParamsOffset()]); 2502 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2503 length); 2504 } else { 2505 TODO(loc, "compute size of derived type with type parameters"); 2506 } 2507 } 2508 // Cast the base address to a pointer to T. 2509 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2510 adaptor.getOperands()[0]); 2511 } else { 2512 // Operand #0 must have a pointer type. For subcomponent slicing, we 2513 // want to cast away the array type and have a plain struct type. 2514 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2515 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2516 assert(ptrTy && "expected pointer type"); 2517 mlir::Type eleTy = ptrTy.getElementType(); 2518 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2519 eleTy = arrTy.getElementType(); 2520 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2521 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2522 adaptor.getOperands()[0]); 2523 } 2524 SmallVector<mlir::Value> args = {offset}; 2525 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2526 args.push_back(operands[i]); 2527 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2528 return success(); 2529 } 2530 }; 2531 2532 // 2533 // Primitive operations on Complex types 2534 // 2535 2536 /// Generate inline code for complex addition/subtraction 2537 template <typename LLVMOP, typename OPTY> 2538 static mlir::LLVM::InsertValueOp 2539 complexSum(OPTY sumop, mlir::ValueRange opnds, 2540 mlir::ConversionPatternRewriter &rewriter, 2541 fir::LLVMTypeConverter &lowering) { 2542 mlir::Value a = opnds[0]; 2543 mlir::Value b = opnds[1]; 2544 auto loc = sumop.getLoc(); 2545 auto ctx = sumop.getContext(); 2546 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2547 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2548 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 2549 mlir::Type ty = lowering.convertType(sumop.getType()); 2550 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2551 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2552 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2553 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2554 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 2555 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 2556 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2557 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 2558 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 2559 } 2560 2561 namespace { 2562 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 2563 using FIROpConversion::FIROpConversion; 2564 2565 mlir::LogicalResult 2566 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 2567 mlir::ConversionPatternRewriter &rewriter) const override { 2568 // given: (x + iy) + (x' + iy') 2569 // result: (x + x') + i(y + y') 2570 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 2571 rewriter, lowerTy()); 2572 rewriter.replaceOp(addc, r.getResult()); 2573 return success(); 2574 } 2575 }; 2576 2577 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 2578 using FIROpConversion::FIROpConversion; 2579 2580 mlir::LogicalResult 2581 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 2582 mlir::ConversionPatternRewriter &rewriter) const override { 2583 // given: (x + iy) - (x' + iy') 2584 // result: (x - x') + i(y - y') 2585 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 2586 rewriter, lowerTy()); 2587 rewriter.replaceOp(subc, r.getResult()); 2588 return success(); 2589 } 2590 }; 2591 2592 /// Inlined complex multiply 2593 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 2594 using FIROpConversion::FIROpConversion; 2595 2596 mlir::LogicalResult 2597 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 2598 mlir::ConversionPatternRewriter &rewriter) const override { 2599 // TODO: Can we use a call to __muldc3 ? 2600 // given: (x + iy) * (x' + iy') 2601 // result: (xx'-yy')+i(xy'+yx') 2602 mlir::Value a = adaptor.getOperands()[0]; 2603 mlir::Value b = adaptor.getOperands()[1]; 2604 auto loc = mulc.getLoc(); 2605 auto *ctx = mulc.getContext(); 2606 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2607 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2608 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 2609 mlir::Type ty = convertType(mulc.getType()); 2610 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2611 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2612 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2613 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2614 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 2615 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 2616 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 2617 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 2618 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 2619 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 2620 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2621 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 2622 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 2623 rewriter.replaceOp(mulc, r0.getResult()); 2624 return success(); 2625 } 2626 }; 2627 2628 /// Inlined complex division 2629 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 2630 using FIROpConversion::FIROpConversion; 2631 2632 mlir::LogicalResult 2633 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 2634 mlir::ConversionPatternRewriter &rewriter) const override { 2635 // TODO: Can we use a call to __divdc3 instead? 2636 // Just generate inline code for now. 2637 // given: (x + iy) / (x' + iy') 2638 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 2639 mlir::Value a = adaptor.getOperands()[0]; 2640 mlir::Value b = adaptor.getOperands()[1]; 2641 auto loc = divc.getLoc(); 2642 auto *ctx = divc.getContext(); 2643 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2644 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2645 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 2646 mlir::Type ty = convertType(divc.getType()); 2647 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 2648 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 2649 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 2650 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 2651 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 2652 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 2653 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 2654 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 2655 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 2656 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 2657 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 2658 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 2659 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 2660 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 2661 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 2662 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 2663 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 2664 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 2665 rewriter.replaceOp(divc, r0.getResult()); 2666 return success(); 2667 } 2668 }; 2669 2670 /// Inlined complex negation 2671 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 2672 using FIROpConversion::FIROpConversion; 2673 2674 mlir::LogicalResult 2675 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 2676 mlir::ConversionPatternRewriter &rewriter) const override { 2677 // given: -(x + iy) 2678 // result: -x - iy 2679 auto *ctxt = neg.getContext(); 2680 auto eleTy = convertType(getComplexEleTy(neg.getType())); 2681 auto ty = convertType(neg.getType()); 2682 auto loc = neg.getLoc(); 2683 mlir::Value o0 = adaptor.getOperands()[0]; 2684 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 2685 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 2686 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 2687 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 2688 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 2689 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 2690 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 2691 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 2692 return success(); 2693 } 2694 }; 2695 2696 /// Conversion pattern for operation that must be dead. The information in these 2697 /// operations is used by other operation. At this point they should not have 2698 /// anymore uses. 2699 /// These operations are normally dead after the pre-codegen pass. 2700 template <typename FromOp> 2701 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 2702 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering) 2703 : FIROpConversion<FromOp>(lowering) {} 2704 using OpAdaptor = typename FromOp::Adaptor; 2705 2706 mlir::LogicalResult 2707 matchAndRewrite(FromOp op, OpAdaptor adaptor, 2708 mlir::ConversionPatternRewriter &rewriter) const final { 2709 if (!op->getUses().empty()) 2710 return rewriter.notifyMatchFailure(op, "op must be dead"); 2711 rewriter.eraseOp(op); 2712 return success(); 2713 } 2714 }; 2715 2716 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 2717 using MustBeDeadConversion::MustBeDeadConversion; 2718 }; 2719 2720 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 2721 using MustBeDeadConversion::MustBeDeadConversion; 2722 }; 2723 2724 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 2725 using MustBeDeadConversion::MustBeDeadConversion; 2726 }; 2727 2728 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 2729 using MustBeDeadConversion::MustBeDeadConversion; 2730 }; 2731 2732 /// `fir.is_present` --> 2733 /// ``` 2734 /// %0 = llvm.mlir.constant(0 : i64) 2735 /// %1 = llvm.ptrtoint %0 2736 /// %2 = llvm.icmp "ne" %1, %0 : i64 2737 /// ``` 2738 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 2739 using FIROpConversion::FIROpConversion; 2740 2741 mlir::LogicalResult 2742 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 2743 mlir::ConversionPatternRewriter &rewriter) const override { 2744 mlir::Type idxTy = lowerTy().indexType(); 2745 mlir::Location loc = isPresent.getLoc(); 2746 auto ptr = adaptor.getOperands()[0]; 2747 2748 if (isPresent.val().getType().isa<fir::BoxCharType>()) { 2749 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 2750 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 2751 2752 mlir::Type ty = structTy.getBody()[0]; 2753 mlir::MLIRContext *ctx = isPresent.getContext(); 2754 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2755 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 2756 } 2757 mlir::LLVM::ConstantOp c0 = 2758 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 2759 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 2760 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 2761 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 2762 2763 return success(); 2764 } 2765 }; 2766 2767 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 2768 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 2769 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 2770 /// element is the length of the character buffer (`#n`). 2771 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 2772 using FIROpConversion::FIROpConversion; 2773 2774 mlir::LogicalResult 2775 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 2776 mlir::ConversionPatternRewriter &rewriter) const override { 2777 mlir::ValueRange operands = adaptor.getOperands(); 2778 MLIRContext *ctx = emboxChar.getContext(); 2779 2780 mlir::Value charBuffer = operands[0]; 2781 mlir::Value charBufferLen = operands[1]; 2782 2783 mlir::Location loc = emboxChar.getLoc(); 2784 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 2785 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 2786 2787 mlir::Type lenTy = 2788 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 2789 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 2790 2791 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 2792 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 2793 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2794 loc, llvmStructTy, llvmStruct, charBuffer, c0); 2795 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2796 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 2797 2798 return success(); 2799 } 2800 }; 2801 } // namespace 2802 2803 /// Construct an `llvm.extractvalue` instruction. It will return value at 2804 /// element \p x from \p tuple. 2805 static mlir::LLVM::ExtractValueOp 2806 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 2807 mlir::ConversionPatternRewriter &rewriter, 2808 mlir::MLIRContext *ctx, int x) { 2809 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 2810 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 2811 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 2812 } 2813 2814 namespace { 2815 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 2816 /// boxchar. 2817 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 2818 using FIROpConversion::FIROpConversion; 2819 2820 mlir::LogicalResult 2821 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 2822 mlir::ConversionPatternRewriter &rewriter) const override { 2823 mlir::Value boxChar = adaptor.getOperands()[0]; 2824 mlir::Location loc = boxChar.getLoc(); 2825 mlir::MLIRContext *ctx = boxChar.getContext(); 2826 mlir::Type returnValTy = boxCharLen.getResult().getType(); 2827 2828 constexpr int boxcharLenIdx = 1; 2829 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 2830 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 2831 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 2832 rewriter.replaceOp(boxCharLen, lenAfterCast); 2833 2834 return success(); 2835 } 2836 }; 2837 2838 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2839 /// the character buffer and one for the buffer length. 2840 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2841 using FIROpConversion::FIROpConversion; 2842 2843 mlir::LogicalResult 2844 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2845 mlir::ConversionPatternRewriter &rewriter) const override { 2846 MLIRContext *ctx = unboxchar.getContext(); 2847 2848 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2849 mlir::Value tuple = adaptor.getOperands()[0]; 2850 mlir::Type tupleTy = tuple.getType(); 2851 2852 mlir::Location loc = unboxchar.getLoc(); 2853 mlir::Value ptrToBuffer = 2854 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2855 2856 mlir::LLVM::ExtractValueOp len = 2857 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2858 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2859 2860 rewriter.replaceOp(unboxchar, 2861 ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2862 return success(); 2863 } 2864 }; 2865 2866 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 2867 /// components. 2868 /// TODO: Part of supporting Fortran 2003 procedure pointers. 2869 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 2870 using FIROpConversion::FIROpConversion; 2871 2872 mlir::LogicalResult 2873 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 2874 mlir::ConversionPatternRewriter &rewriter) const override { 2875 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 2876 return failure(); 2877 } 2878 }; 2879 2880 /// Convert `fir.field_index`. The conversion depends on whether the size of 2881 /// the record is static or dynamic. 2882 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2883 using FIROpConversion::FIROpConversion; 2884 2885 // NB: most field references should be resolved by this point 2886 mlir::LogicalResult 2887 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2888 mlir::ConversionPatternRewriter &rewriter) const override { 2889 auto recTy = field.on_type().cast<fir::RecordType>(); 2890 unsigned index = recTy.getFieldIndex(field.field_id()); 2891 2892 if (!fir::hasDynamicSize(recTy)) { 2893 // Derived type has compile-time constant layout. Return index of the 2894 // component type in the parent type (to be used in GEP). 2895 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2896 field.getLoc(), rewriter, index)}); 2897 return success(); 2898 } 2899 2900 // Derived type has compile-time constant layout. Call the compiler 2901 // generated function to determine the byte offset of the field at runtime. 2902 // This returns a non-constant. 2903 FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2904 field.getContext(), getOffsetMethodName(recTy, field.field_id())); 2905 NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2906 NamedAttribute fieldAttr = rewriter.getNamedAttr( 2907 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2908 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2909 field, lowerTy().offsetType(), adaptor.getOperands(), 2910 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2911 return success(); 2912 } 2913 2914 // Re-Construct the name of the compiler generated method that calculates the 2915 // offset 2916 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2917 llvm::StringRef field) { 2918 return recTy.getName().str() + "P." + field.str() + ".offset"; 2919 } 2920 }; 2921 2922 /// Convert to (memory) reference to a reference to a subobject. 2923 /// The coordinate_of op is a Swiss army knife operation that can be used on 2924 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2925 /// With unboxed arrays, there is the restriction that the array have a static 2926 /// shape in all but the last column. 2927 struct CoordinateOpConversion 2928 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2929 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2930 2931 mlir::LogicalResult 2932 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2933 mlir::ConversionPatternRewriter &rewriter) const override { 2934 mlir::ValueRange operands = adaptor.getOperands(); 2935 2936 mlir::Location loc = coor.getLoc(); 2937 mlir::Value base = operands[0]; 2938 mlir::Type baseObjectTy = coor.getBaseType(); 2939 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2940 assert(objectTy && "fir.coordinate_of expects a reference type"); 2941 2942 // Complex type - basically, extract the real or imaginary part 2943 if (fir::isa_complex(objectTy)) { 2944 mlir::LLVM::ConstantOp c0 = 2945 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2946 SmallVector<mlir::Value> offs = {c0, operands[1]}; 2947 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2948 rewriter.replaceOp(coor, gep); 2949 return success(); 2950 } 2951 2952 // Boxed type - get the base pointer from the box 2953 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2954 return doRewriteBox(coor, ty, operands, loc, rewriter); 2955 2956 // Reference or pointer type 2957 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType>()) 2958 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2959 2960 return rewriter.notifyMatchFailure( 2961 coor, "fir.coordinate_of base operand has unsupported type"); 2962 } 2963 2964 unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) const { 2965 return fir::hasDynamicSize(ty) 2966 ? op.getDefiningOp() 2967 ->getAttrOfType<mlir::IntegerAttr>("field") 2968 .getInt() 2969 : getIntValue(op); 2970 } 2971 2972 int64_t getIntValue(mlir::Value val) const { 2973 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2974 mlir::Operation *defop = val.getDefiningOp(); 2975 2976 if (auto constOp = dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2977 return constOp.value(); 2978 if (auto llConstOp = dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2979 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2980 return attr.getValue().getSExtValue(); 2981 fir::emitFatalError(val.getLoc(), "must be a constant"); 2982 } 2983 2984 bool hasSubDimensions(mlir::Type type) const { 2985 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2986 } 2987 2988 /// Check whether this form of `!fir.coordinate_of` is supported. These 2989 /// additional checks are required, because we are not yet able to convert 2990 /// all valid forms of `!fir.coordinate_of`. 2991 /// TODO: Either implement the unsupported cases or extend the verifier 2992 /// in FIROps.cpp instead. 2993 bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) const { 2994 const std::size_t numOfCoors = coors.size(); 2995 std::size_t i = 0; 2996 bool subEle = false; 2997 bool ptrEle = false; 2998 for (; i < numOfCoors; ++i) { 2999 mlir::Value nxtOpnd = coors[i]; 3000 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 3001 subEle = true; 3002 i += arrTy.getDimension() - 1; 3003 type = arrTy.getEleTy(); 3004 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 3005 subEle = true; 3006 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3007 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 3008 subEle = true; 3009 type = tupTy.getType(getIntValue(nxtOpnd)); 3010 } else { 3011 ptrEle = true; 3012 } 3013 } 3014 if (ptrEle) 3015 return (!subEle) && (numOfCoors == 1); 3016 return subEle && (i >= numOfCoors); 3017 } 3018 3019 /// Walk the abstract memory layout and determine if the path traverses any 3020 /// array types with unknown shape. Return true iff all the array types have a 3021 /// constant shape along the path. 3022 bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) const { 3023 const std::size_t sz = coors.size(); 3024 std::size_t i = 0; 3025 for (; i < sz; ++i) { 3026 mlir::Value nxtOpnd = coors[i]; 3027 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 3028 if (fir::sequenceWithNonConstantShape(arrTy)) 3029 return false; 3030 i += arrTy.getDimension() - 1; 3031 type = arrTy.getEleTy(); 3032 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 3033 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 3034 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 3035 type = strTy.getType(getIntValue(nxtOpnd)); 3036 } else { 3037 return true; 3038 } 3039 } 3040 return true; 3041 } 3042 3043 private: 3044 mlir::LogicalResult 3045 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 3046 mlir::Location loc, 3047 mlir::ConversionPatternRewriter &rewriter) const { 3048 mlir::Type boxObjTy = coor.getBaseType(); 3049 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 3050 3051 mlir::Value boxBaseAddr = operands[0]; 3052 3053 // 1. SPECIAL CASE (uses `fir.len_param_index`): 3054 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 3055 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 3056 // %addr = coordinate_of %box, %lenp 3057 if (coor.getNumOperands() == 2) { 3058 mlir::Operation *coordinateDef = (*coor.coor().begin()).getDefiningOp(); 3059 if (isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) { 3060 TODO(loc, 3061 "fir.coordinate_of - fir.len_param_index is not supported yet"); 3062 } 3063 } 3064 3065 // 2. GENERAL CASE: 3066 // 2.1. (`fir.array`) 3067 // %box = ... : !fix.box<!fir.array<?xU>> 3068 // %idx = ... : index 3069 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 3070 // 2.2 (`fir.derived`) 3071 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 3072 // %idx = ... : i32 3073 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 3074 // 2.3 (`fir.derived` inside `fir.array`) 3075 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, field_2:f32}>>> 3076 // %idx1 = ... : index 3077 // %idx2 = ... : i32 3078 // %resultAddr = coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 3079 // 2.4. TODO: Either document or disable any other case that the following 3080 // implementation might convert. 3081 mlir::LLVM::ConstantOp c0 = 3082 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 3083 mlir::Value resultAddr = 3084 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 3085 boxBaseAddr, rewriter); 3086 auto currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 3087 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 3088 3089 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 3090 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3091 if (i != 1) 3092 TODO(loc, "fir.array nested inside other array and/or derived type"); 3093 // Applies byte strides from the box. Ignore lower bound from box 3094 // since fir.coordinate_of indexes are zero based. Lowering takes care 3095 // of lower bound aspects. This both accounts for dynamically sized 3096 // types and non contiguous arrays. 3097 auto idxTy = lowerTy().indexType(); 3098 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 3099 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 3100 index < lastIndex; ++index) { 3101 mlir::Value stride = 3102 loadStrideFromBox(loc, operands[0], index - i, rewriter); 3103 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 3104 operands[index], stride); 3105 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 3106 } 3107 auto voidPtrBase = 3108 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 3109 SmallVector<mlir::Value> args{off}; 3110 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 3111 voidPtrBase, args); 3112 i += arrTy.getDimension() - 1; 3113 currentObjTy = arrTy.getEleTy(); 3114 } else if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) { 3115 auto recRefTy = 3116 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 3117 mlir::Value nxtOpnd = operands[i]; 3118 auto memObj = 3119 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 3120 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 3121 currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3122 auto llvmCurrentObjTy = lowerTy().convertType(currentObjTy); 3123 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 3124 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 3125 args); 3126 resultAddr = 3127 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 3128 } else { 3129 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 3130 } 3131 } 3132 3133 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 3134 return success(); 3135 } 3136 3137 mlir::LogicalResult 3138 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 3139 mlir::ValueRange operands, mlir::Location loc, 3140 mlir::ConversionPatternRewriter &rewriter) const { 3141 mlir::Type baseObjectTy = coor.getBaseType(); 3142 3143 mlir::Type currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 3144 bool hasSubdimension = hasSubDimensions(currentObjTy); 3145 bool columnIsDeferred = !hasSubdimension; 3146 3147 if (!supportedCoordinate(currentObjTy, operands.drop_front(1))) { 3148 TODO(loc, "unsupported combination of coordinate operands"); 3149 } 3150 3151 const bool hasKnownShape = 3152 arraysHaveKnownShape(currentObjTy, operands.drop_front(1)); 3153 3154 // If only the column is `?`, then we can simply place the column value in 3155 // the 0-th GEP position. 3156 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3157 if (!hasKnownShape) { 3158 const unsigned sz = arrTy.getDimension(); 3159 if (arraysHaveKnownShape(arrTy.getEleTy(), 3160 operands.drop_front(1 + sz))) { 3161 llvm::ArrayRef<int64_t> shape = arrTy.getShape(); 3162 bool allConst = true; 3163 for (unsigned i = 0; i < sz - 1; ++i) { 3164 if (shape[i] < 0) { 3165 allConst = false; 3166 break; 3167 } 3168 } 3169 if (allConst) 3170 columnIsDeferred = true; 3171 } 3172 } 3173 } 3174 3175 if (fir::hasDynamicSize(fir::unwrapSequenceType(currentObjTy))) { 3176 mlir::emitError( 3177 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 3178 return failure(); 3179 } 3180 3181 if (hasKnownShape || columnIsDeferred) { 3182 SmallVector<mlir::Value> offs; 3183 if (hasKnownShape && hasSubdimension) { 3184 mlir::LLVM::ConstantOp c0 = 3185 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 3186 offs.push_back(c0); 3187 } 3188 const std::size_t sz = operands.size(); 3189 Optional<int> dims; 3190 SmallVector<mlir::Value> arrIdx; 3191 for (std::size_t i = 1; i < sz; ++i) { 3192 mlir::Value nxtOpnd = operands[i]; 3193 3194 if (!currentObjTy) { 3195 mlir::emitError(loc, "invalid coordinate/check failed"); 3196 return failure(); 3197 } 3198 3199 // check if the i-th coordinate relates to an array 3200 if (dims.hasValue()) { 3201 arrIdx.push_back(nxtOpnd); 3202 int dimsLeft = *dims; 3203 if (dimsLeft > 1) { 3204 dims = dimsLeft - 1; 3205 continue; 3206 } 3207 currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy(); 3208 // append array range in reverse (FIR arrays are column-major) 3209 offs.append(arrIdx.rbegin(), arrIdx.rend()); 3210 arrIdx.clear(); 3211 dims.reset(); 3212 continue; 3213 } 3214 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 3215 int d = arrTy.getDimension() - 1; 3216 if (d > 0) { 3217 dims = d; 3218 arrIdx.push_back(nxtOpnd); 3219 continue; 3220 } 3221 currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy(); 3222 offs.push_back(nxtOpnd); 3223 continue; 3224 } 3225 3226 // check if the i-th coordinate relates to a field 3227 if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) 3228 currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 3229 else if (auto tupTy = currentObjTy.dyn_cast<mlir::TupleType>()) 3230 currentObjTy = tupTy.getType(getIntValue(nxtOpnd)); 3231 else 3232 currentObjTy = nullptr; 3233 3234 offs.push_back(nxtOpnd); 3235 } 3236 if (dims.hasValue()) 3237 offs.append(arrIdx.rbegin(), arrIdx.rend()); 3238 mlir::Value base = operands[0]; 3239 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 3240 rewriter.replaceOp(coor, retval); 3241 return success(); 3242 } 3243 3244 mlir::emitError(loc, "fir.coordinate_of base operand has unsupported type"); 3245 return failure(); 3246 } 3247 }; 3248 3249 } // namespace 3250 3251 namespace { 3252 /// Convert FIR dialect to LLVM dialect 3253 /// 3254 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3255 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3256 /// 3257 /// This pass is not complete yet. We are upstreaming it in small patches. 3258 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3259 public: 3260 mlir::ModuleOp getModule() { return getOperation(); } 3261 3262 void runOnOperation() override final { 3263 auto mod = getModule(); 3264 if (!forcedTargetTriple.empty()) { 3265 fir::setTargetTriple(mod, forcedTargetTriple); 3266 } 3267 3268 auto *context = getModule().getContext(); 3269 fir::LLVMTypeConverter typeConverter{getModule()}; 3270 mlir::RewritePatternSet pattern(context); 3271 pattern.insert< 3272 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3273 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3274 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3275 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3276 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3277 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3278 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3279 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3280 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3281 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3282 FreeMemOpConversion, HasValueOpConversion, GenTypeDescOpConversion, 3283 GlobalLenOpConversion, GlobalOpConversion, InsertOnRangeOpConversion, 3284 InsertValueOpConversion, IsPresentOpConversion, 3285 LenParamIndexOpConversion, LoadOpConversion, NegcOpConversion, 3286 NoReassocOpConversion, MulcOpConversion, SelectCaseOpConversion, 3287 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3288 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3289 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3290 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3291 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3292 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>( 3293 typeConverter); 3294 mlir::populateStdToLLVMConversionPatterns(typeConverter, pattern); 3295 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3296 pattern); 3297 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3298 pattern); 3299 mlir::ConversionTarget target{*context}; 3300 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3301 3302 // required NOPs for applying a full conversion 3303 target.addLegalOp<mlir::ModuleOp>(); 3304 3305 // apply the patterns 3306 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3307 std::move(pattern)))) { 3308 signalPassFailure(); 3309 } 3310 } 3311 }; 3312 3313 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3314 struct LLVMIRLoweringPass 3315 : public mlir::PassWrapper<LLVMIRLoweringPass, 3316 mlir::OperationPass<mlir::ModuleOp>> { 3317 using Printer = fir::LLVMIRLoweringPrinter; 3318 LLVMIRLoweringPass(raw_ostream &output, Printer p) 3319 : output{output}, printer{p} {} 3320 3321 mlir::ModuleOp getModule() { return getOperation(); } 3322 3323 void runOnOperation() override final { 3324 auto *ctx = getModule().getContext(); 3325 auto optName = getModule().getName(); 3326 llvm::LLVMContext llvmCtx; 3327 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3328 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3329 printer(*llvmModule, output); 3330 return; 3331 } 3332 3333 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3334 signalPassFailure(); 3335 } 3336 3337 private: 3338 raw_ostream &output; 3339 Printer printer; 3340 }; 3341 3342 } // namespace 3343 3344 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3345 return std::make_unique<FIRToLLVMLowering>(); 3346 } 3347 3348 std::unique_ptr<mlir::Pass> 3349 fir::createLLVMDialectToLLVMPass(raw_ostream &output, 3350 fir::LLVMIRLoweringPrinter printer) { 3351 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3352 } 3353