1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "flang/Optimizer/CodeGen/CodeGen.h"
14 #include "CGOps.h"
15 #include "PassDetail.h"
16 #include "flang/ISO_Fortran_binding.h"
17 #include "flang/Optimizer/Dialect/FIRAttr.h"
18 #include "flang/Optimizer/Dialect/FIROps.h"
19 #include "flang/Optimizer/Support/InternalNames.h"
20 #include "flang/Optimizer/Support/TypeCode.h"
21 #include "flang/Semantics/runtime-type-info.h"
22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h"
23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h"
24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h"
25 #include "mlir/Conversion/LLVMCommon/Pattern.h"
26 #include "mlir/Conversion/MathToLLVM/MathToLLVM.h"
27 #include "mlir/Conversion/MathToLibm/MathToLibm.h"
28 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h"
29 #include "mlir/IR/BuiltinTypes.h"
30 #include "mlir/IR/Matchers.h"
31 #include "mlir/Pass/Pass.h"
32 #include "mlir/Target/LLVMIR/ModuleTranslation.h"
33 #include "llvm/ADT/ArrayRef.h"
34
35 #define DEBUG_TYPE "flang-codegen"
36
37 // fir::LLVMTypeConverter for converting to LLVM IR dialect types.
38 #include "TypeConverter.h"
39
40 // TODO: This should really be recovered from the specified target.
41 static constexpr unsigned defaultAlign = 8;
42
43 /// `fir.box` attribute values as defined for CFI_attribute_t in
44 /// flang/ISO_Fortran_binding.h.
45 static constexpr unsigned kAttrPointer = CFI_attribute_pointer;
46 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable;
47
getVoidPtrType(mlir::MLIRContext * context)48 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) {
49 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8));
50 }
51
52 static mlir::LLVM::ConstantOp
genConstantIndex(mlir::Location loc,mlir::Type ity,mlir::ConversionPatternRewriter & rewriter,std::int64_t offset)53 genConstantIndex(mlir::Location loc, mlir::Type ity,
54 mlir::ConversionPatternRewriter &rewriter,
55 std::int64_t offset) {
56 auto cattr = rewriter.getI64IntegerAttr(offset);
57 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr);
58 }
59
createBlock(mlir::ConversionPatternRewriter & rewriter,mlir::Block * insertBefore)60 static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter,
61 mlir::Block *insertBefore) {
62 assert(insertBefore && "expected valid insertion block");
63 return rewriter.createBlock(insertBefore->getParent(),
64 mlir::Region::iterator(insertBefore));
65 }
66
67 /// Extract constant from a value that must be the result of one of the
68 /// ConstantOp operations.
getConstantIntValue(mlir::Value val)69 static int64_t getConstantIntValue(mlir::Value val) {
70 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value");
71 mlir::Operation *defop = val.getDefiningOp();
72
73 if (auto constOp = mlir::dyn_cast<mlir::arith::ConstantIntOp>(defop))
74 return constOp.value();
75 if (auto llConstOp = mlir::dyn_cast<mlir::LLVM::ConstantOp>(defop))
76 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>())
77 return attr.getValue().getSExtValue();
78 fir::emitFatalError(val.getLoc(), "must be a constant");
79 }
80
81 namespace {
82 /// FIR conversion pattern template
83 template <typename FromOp>
84 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
85 public:
FIROpConversion(fir::LLVMTypeConverter & lowering,const fir::FIRToLLVMPassOptions & options)86 explicit FIROpConversion(fir::LLVMTypeConverter &lowering,
87 const fir::FIRToLLVMPassOptions &options)
88 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {}
89
90 protected:
convertType(mlir::Type ty) const91 mlir::Type convertType(mlir::Type ty) const {
92 return lowerTy().convertType(ty);
93 }
voidPtrTy() const94 mlir::Type voidPtrTy() const { return getVoidPtrType(); }
95
getVoidPtrType() const96 mlir::Type getVoidPtrType() const {
97 return mlir::LLVM::LLVMPointerType::get(
98 mlir::IntegerType::get(&lowerTy().getContext(), 8));
99 }
100
101 mlir::LLVM::ConstantOp
genI32Constant(mlir::Location loc,mlir::ConversionPatternRewriter & rewriter,int value) const102 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
103 int value) const {
104 mlir::Type i32Ty = rewriter.getI32Type();
105 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value);
106 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr);
107 }
108
109 mlir::LLVM::ConstantOp
genConstantOffset(mlir::Location loc,mlir::ConversionPatternRewriter & rewriter,int offset) const110 genConstantOffset(mlir::Location loc,
111 mlir::ConversionPatternRewriter &rewriter,
112 int offset) const {
113 mlir::Type ity = lowerTy().offsetType();
114 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset);
115 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr);
116 }
117
118 /// Perform an extension or truncation as needed on an integer value. Lowering
119 /// to the specific target may involve some sign-extending or truncation of
120 /// values, particularly to fit them from abstract box types to the
121 /// appropriate reified structures.
integerCast(mlir::Location loc,mlir::ConversionPatternRewriter & rewriter,mlir::Type ty,mlir::Value val) const122 mlir::Value integerCast(mlir::Location loc,
123 mlir::ConversionPatternRewriter &rewriter,
124 mlir::Type ty, mlir::Value val) const {
125 auto valTy = val.getType();
126 // If the value was not yet lowered, lower its type so that it can
127 // be used in getPrimitiveTypeSizeInBits.
128 if (!valTy.isa<mlir::IntegerType>())
129 valTy = convertType(valTy);
130 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty);
131 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy);
132 if (toSize < fromSize)
133 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val);
134 if (toSize > fromSize)
135 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val);
136 return val;
137 }
138
139 /// Construct code sequence to extract the specifc value from a `fir.box`.
getValueFromBox(mlir::Location loc,mlir::Value box,mlir::Type resultTy,mlir::ConversionPatternRewriter & rewriter,unsigned boxValue) const140 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box,
141 mlir::Type resultTy,
142 mlir::ConversionPatternRewriter &rewriter,
143 unsigned boxValue) const {
144 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
145 mlir::LLVM::ConstantOp cValuePos =
146 genConstantOffset(loc, rewriter, boxValue);
147 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy);
148 auto p = rewriter.create<mlir::LLVM::GEPOp>(
149 loc, pty, box, mlir::ValueRange{c0, cValuePos});
150 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p);
151 }
152
153 /// Method to construct code sequence to get the triple for dimension `dim`
154 /// from a box.
155 llvm::SmallVector<mlir::Value, 3>
getDimsFromBox(mlir::Location loc,llvm::ArrayRef<mlir::Type> retTys,mlir::Value box,mlir::Value dim,mlir::ConversionPatternRewriter & rewriter) const156 getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys,
157 mlir::Value box, mlir::Value dim,
158 mlir::ConversionPatternRewriter &rewriter) const {
159 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
160 mlir::LLVM::ConstantOp cDims =
161 genConstantOffset(loc, rewriter, kDimsPosInBox);
162 mlir::LLVM::LoadOp l0 =
163 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter);
164 mlir::LLVM::LoadOp l1 =
165 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter);
166 mlir::LLVM::LoadOp l2 =
167 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter);
168 return {l0.getResult(), l1.getResult(), l2.getResult()};
169 }
170
171 mlir::LLVM::LoadOp
loadFromOffset(mlir::Location loc,mlir::Value a,mlir::LLVM::ConstantOp c0,mlir::LLVM::ConstantOp cDims,mlir::Value dim,int off,mlir::Type ty,mlir::ConversionPatternRewriter & rewriter) const172 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0,
173 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off,
174 mlir::Type ty,
175 mlir::ConversionPatternRewriter &rewriter) const {
176 auto pty = mlir::LLVM::LLVMPointerType::get(ty);
177 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off);
178 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c);
179 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p);
180 }
181
182 mlir::Value
loadStrideFromBox(mlir::Location loc,mlir::Value box,unsigned dim,mlir::ConversionPatternRewriter & rewriter) const183 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim,
184 mlir::ConversionPatternRewriter &rewriter) const {
185 auto idxTy = lowerTy().indexType();
186 auto c0 = genConstantOffset(loc, rewriter, 0);
187 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox);
188 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim);
189 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy,
190 rewriter);
191 }
192
193 /// Read base address from a fir.box. Returned address has type ty.
194 mlir::Value
loadBaseAddrFromBox(mlir::Location loc,mlir::Type ty,mlir::Value box,mlir::ConversionPatternRewriter & rewriter) const195 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box,
196 mlir::ConversionPatternRewriter &rewriter) const {
197 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
198 mlir::LLVM::ConstantOp cAddr =
199 genConstantOffset(loc, rewriter, kAddrPosInBox);
200 auto pty = mlir::LLVM::LLVMPointerType::get(ty);
201 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr);
202 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p);
203 }
204
205 mlir::Value
loadElementSizeFromBox(mlir::Location loc,mlir::Type ty,mlir::Value box,mlir::ConversionPatternRewriter & rewriter) const206 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box,
207 mlir::ConversionPatternRewriter &rewriter) const {
208 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
209 mlir::LLVM::ConstantOp cElemLen =
210 genConstantOffset(loc, rewriter, kElemLenPosInBox);
211 auto pty = mlir::LLVM::LLVMPointerType::get(ty);
212 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen);
213 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p);
214 }
215
216 // Get the element type given an LLVM type that is of the form
217 // [llvm.ptr](array|struct|vector)+ and the provided indexes.
getBoxEleTy(mlir::Type type,llvm::ArrayRef<unsigned> indexes)218 static mlir::Type getBoxEleTy(mlir::Type type,
219 llvm::ArrayRef<unsigned> indexes) {
220 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>())
221 type = t.getElementType();
222 for (auto i : indexes) {
223 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) {
224 assert(!t.isOpaque() && i < t.getBody().size());
225 type = t.getBody()[i];
226 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) {
227 type = t.getElementType();
228 } else if (auto t = type.dyn_cast<mlir::VectorType>()) {
229 type = t.getElementType();
230 } else {
231 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()),
232 "request for invalid box element type");
233 }
234 }
235 return type;
236 }
237
238 // Return LLVM type of the base address given the LLVM type
239 // of the related descriptor (lowered fir.box type).
getBaseAddrTypeFromBox(mlir::Type type)240 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) {
241 return getBoxEleTy(type, {kAddrPosInBox});
242 }
243
244 // Load the attribute from the \p box and perform a check against \p maskValue
245 // The final comparison is implemented as `(attribute & maskValue) != 0`.
genBoxAttributeCheck(mlir::Location loc,mlir::Value box,mlir::ConversionPatternRewriter & rewriter,unsigned maskValue) const246 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box,
247 mlir::ConversionPatternRewriter &rewriter,
248 unsigned maskValue) const {
249 mlir::Type attrTy = rewriter.getI32Type();
250 mlir::Value attribute =
251 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox);
252 mlir::LLVM::ConstantOp attrMask =
253 genConstantOffset(loc, rewriter, maskValue);
254 auto maskRes =
255 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask);
256 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0);
257 return rewriter.create<mlir::LLVM::ICmpOp>(
258 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0);
259 }
260
261 template <typename... ARGS>
genGEP(mlir::Location loc,mlir::Type ty,mlir::ConversionPatternRewriter & rewriter,mlir::Value base,ARGS...args) const262 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty,
263 mlir::ConversionPatternRewriter &rewriter,
264 mlir::Value base, ARGS... args) const {
265 llvm::SmallVector<mlir::Value> cv = {args...};
266 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv);
267 }
268
lowerTy() const269 fir::LLVMTypeConverter &lowerTy() const {
270 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter());
271 }
272
273 const fir::FIRToLLVMPassOptions &options;
274 };
275
276 /// FIR conversion pattern template
277 template <typename FromOp>
278 class FIROpAndTypeConversion : public FIROpConversion<FromOp> {
279 public:
280 using FIROpConversion<FromOp>::FIROpConversion;
281 using OpAdaptor = typename FromOp::Adaptor;
282
283 mlir::LogicalResult
matchAndRewrite(FromOp op,OpAdaptor adaptor,mlir::ConversionPatternRewriter & rewriter) const284 matchAndRewrite(FromOp op, OpAdaptor adaptor,
285 mlir::ConversionPatternRewriter &rewriter) const final {
286 mlir::Type ty = this->convertType(op.getType());
287 return doRewrite(op, ty, adaptor, rewriter);
288 }
289
290 virtual mlir::LogicalResult
291 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor,
292 mlir::ConversionPatternRewriter &rewriter) const = 0;
293 };
294 } // namespace
295
296 namespace {
297 /// Lower `fir.address_of` operation to `llvm.address_of` operation.
298 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> {
299 using FIROpConversion::FIROpConversion;
300
301 mlir::LogicalResult
matchAndRewrite__anon447c3e180211::AddrOfOpConversion302 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor,
303 mlir::ConversionPatternRewriter &rewriter) const override {
304 auto ty = convertType(addr.getType());
305 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>(
306 addr, ty, addr.getSymbol().getRootReference().getValue());
307 return mlir::success();
308 }
309 };
310 } // namespace
311
312 /// Lookup the function to compute the memory size of this parametric derived
313 /// type. The size of the object may depend on the LEN type parameters of the
314 /// derived type.
315 static mlir::LLVM::LLVMFuncOp
getDependentTypeMemSizeFn(fir::RecordType recTy,fir::AllocaOp op,mlir::ConversionPatternRewriter & rewriter)316 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op,
317 mlir::ConversionPatternRewriter &rewriter) {
318 auto module = op->getParentOfType<mlir::ModuleOp>();
319 std::string name = recTy.getName().str() + "P.mem.size";
320 if (auto memSizeFunc = module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name))
321 return memSizeFunc;
322 TODO(op.getLoc(), "did not find allocation function");
323 }
324
325 // Compute the alloc scale size (constant factors encoded in the array type).
326 // We do this for arrays without a constant interior or arrays of character with
327 // dynamic length arrays, since those are the only ones that get decayed to a
328 // pointer to the element type.
329 template <typename OP>
330 static mlir::Value
genAllocationScaleSize(OP op,mlir::Type ity,mlir::ConversionPatternRewriter & rewriter)331 genAllocationScaleSize(OP op, mlir::Type ity,
332 mlir::ConversionPatternRewriter &rewriter) {
333 mlir::Location loc = op.getLoc();
334 mlir::Type dataTy = op.getInType();
335 mlir::Type scalarType = fir::unwrapSequenceType(dataTy);
336 auto seqTy = dataTy.dyn_cast<fir::SequenceType>();
337 if ((op.hasShapeOperands() && seqTy && !seqTy.hasConstantInterior()) ||
338 (seqTy && fir::characterWithDynamicLen(scalarType))) {
339 fir::SequenceType::Extent constSize = 1;
340 for (auto extent : seqTy.getShape())
341 if (extent != fir::SequenceType::getUnknownExtent())
342 constSize *= extent;
343 if (constSize != 1) {
344 mlir::Value constVal{
345 genConstantIndex(loc, ity, rewriter, constSize).getResult()};
346 return constVal;
347 }
348 }
349 return nullptr;
350 }
351
352 namespace {
353 /// convert to LLVM IR dialect `alloca`
354 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> {
355 using FIROpConversion::FIROpConversion;
356
357 mlir::LogicalResult
matchAndRewrite__anon447c3e180311::AllocaOpConversion358 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor,
359 mlir::ConversionPatternRewriter &rewriter) const override {
360 mlir::ValueRange operands = adaptor.getOperands();
361 auto loc = alloc.getLoc();
362 mlir::Type ity = lowerTy().indexType();
363 unsigned i = 0;
364 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult();
365 mlir::Type ty = convertType(alloc.getType());
366 mlir::Type resultTy = ty;
367 if (alloc.hasLenParams()) {
368 unsigned end = alloc.numLenParams();
369 llvm::SmallVector<mlir::Value> lenParams;
370 for (; i < end; ++i)
371 lenParams.push_back(operands[i]);
372 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType());
373 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) {
374 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen(
375 chrTy.getContext(), chrTy.getFKind());
376 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy));
377 assert(end == 1);
378 size = integerCast(loc, rewriter, ity, lenParams[0]);
379 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) {
380 mlir::LLVM::LLVMFuncOp memSizeFn =
381 getDependentTypeMemSizeFn(recTy, alloc, rewriter);
382 if (!memSizeFn)
383 emitError(loc, "did not find allocation function");
384 mlir::NamedAttribute attr = rewriter.getNamedAttr(
385 "callee", mlir::SymbolRefAttr::get(memSizeFn));
386 auto call = rewriter.create<mlir::LLVM::CallOp>(
387 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr});
388 size = call.getResult(0);
389 ty = ::getVoidPtrType(alloc.getContext());
390 } else {
391 return emitError(loc, "unexpected type ")
392 << scalarType << " with type parameters";
393 }
394 }
395 if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter))
396 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
397 if (alloc.hasShapeOperands()) {
398 unsigned end = operands.size();
399 for (; i < end; ++i)
400 size = rewriter.create<mlir::LLVM::MulOp>(
401 loc, ity, size, integerCast(loc, rewriter, ity, operands[i]));
402 }
403 if (ty == resultTy) {
404 // Do not emit the bitcast if ty and resultTy are the same.
405 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size,
406 alloc->getAttrs());
407 } else {
408 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size,
409 alloc->getAttrs());
410 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al);
411 }
412 return mlir::success();
413 }
414 };
415 } // namespace
416
417 /// Construct an `llvm.extractvalue` instruction. It will return value at
418 /// element \p x from \p tuple.
419 static mlir::LLVM::ExtractValueOp
genExtractValueWithIndex(mlir::Location loc,mlir::Value tuple,mlir::Type ty,mlir::ConversionPatternRewriter & rewriter,mlir::MLIRContext * ctx,int x)420 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty,
421 mlir::ConversionPatternRewriter &rewriter,
422 mlir::MLIRContext *ctx, int x) {
423 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x));
424 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x];
425 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx);
426 }
427
428 namespace {
429 /// Lower `fir.box_addr` to the sequence of operations to extract the first
430 /// element of the box.
431 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> {
432 using FIROpConversion::FIROpConversion;
433
434 mlir::LogicalResult
matchAndRewrite__anon447c3e180411::BoxAddrOpConversion435 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor,
436 mlir::ConversionPatternRewriter &rewriter) const override {
437 mlir::Value a = adaptor.getOperands()[0];
438 auto loc = boxaddr.getLoc();
439 mlir::Type ty = convertType(boxaddr.getType());
440 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) {
441 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter));
442 } else {
443 auto c0attr = rewriter.getI32IntegerAttr(0);
444 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr);
445 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a,
446 c0);
447 }
448 return mlir::success();
449 }
450 };
451
452 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the
453 /// boxchar.
454 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> {
455 using FIROpConversion::FIROpConversion;
456
457 mlir::LogicalResult
matchAndRewrite__anon447c3e180411::BoxCharLenOpConversion458 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor,
459 mlir::ConversionPatternRewriter &rewriter) const override {
460 mlir::Value boxChar = adaptor.getOperands()[0];
461 mlir::Location loc = boxChar.getLoc();
462 mlir::MLIRContext *ctx = boxChar.getContext();
463 mlir::Type returnValTy = boxCharLen.getResult().getType();
464
465 constexpr int boxcharLenIdx = 1;
466 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex(
467 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx);
468 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len);
469 rewriter.replaceOp(boxCharLen, lenAfterCast);
470
471 return mlir::success();
472 }
473 };
474
475 /// Lower `fir.box_dims` to a sequence of operations to extract the requested
476 /// dimension infomartion from the boxed value.
477 /// Result in a triple set of GEPs and loads.
478 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> {
479 using FIROpConversion::FIROpConversion;
480
481 mlir::LogicalResult
matchAndRewrite__anon447c3e180411::BoxDimsOpConversion482 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor,
483 mlir::ConversionPatternRewriter &rewriter) const override {
484 llvm::SmallVector<mlir::Type, 3> resultTypes = {
485 convertType(boxdims.getResult(0).getType()),
486 convertType(boxdims.getResult(1).getType()),
487 convertType(boxdims.getResult(2).getType()),
488 };
489 auto results =
490 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0],
491 adaptor.getOperands()[1], rewriter);
492 rewriter.replaceOp(boxdims, results);
493 return mlir::success();
494 }
495 };
496
497 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of
498 /// an element in the boxed value.
499 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> {
500 using FIROpConversion::FIROpConversion;
501
502 mlir::LogicalResult
matchAndRewrite__anon447c3e180411::BoxEleSizeOpConversion503 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor,
504 mlir::ConversionPatternRewriter &rewriter) const override {
505 mlir::Value a = adaptor.getOperands()[0];
506 auto loc = boxelesz.getLoc();
507 auto ty = convertType(boxelesz.getType());
508 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox);
509 rewriter.replaceOp(boxelesz, elemSize);
510 return mlir::success();
511 }
512 };
513
514 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the
515 /// boxed value was from an ALLOCATABLE entity.
516 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> {
517 using FIROpConversion::FIROpConversion;
518
519 mlir::LogicalResult
matchAndRewrite__anon447c3e180411::BoxIsAllocOpConversion520 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor,
521 mlir::ConversionPatternRewriter &rewriter) const override {
522 mlir::Value box = adaptor.getOperands()[0];
523 auto loc = boxisalloc.getLoc();
524 mlir::Value check =
525 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable);
526 rewriter.replaceOp(boxisalloc, check);
527 return mlir::success();
528 }
529 };
530
531 /// Lower `fir.box_isarray` to a sequence of operations to determine if the
532 /// boxed is an array.
533 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> {
534 using FIROpConversion::FIROpConversion;
535
536 mlir::LogicalResult
matchAndRewrite__anon447c3e180411::BoxIsArrayOpConversion537 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor,
538 mlir::ConversionPatternRewriter &rewriter) const override {
539 mlir::Value a = adaptor.getOperands()[0];
540 auto loc = boxisarray.getLoc();
541 auto rank =
542 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox);
543 auto c0 = genConstantOffset(loc, rewriter, 0);
544 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>(
545 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0);
546 return mlir::success();
547 }
548 };
549
550 /// Lower `fir.box_isptr` to a sequence of operations to determined if the
551 /// boxed value was from a POINTER entity.
552 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> {
553 using FIROpConversion::FIROpConversion;
554
555 mlir::LogicalResult
matchAndRewrite__anon447c3e180411::BoxIsPtrOpConversion556 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor,
557 mlir::ConversionPatternRewriter &rewriter) const override {
558 mlir::Value box = adaptor.getOperands()[0];
559 auto loc = boxisptr.getLoc();
560 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer);
561 rewriter.replaceOp(boxisptr, check);
562 return mlir::success();
563 }
564 };
565
566 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from
567 /// the box.
568 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> {
569 using FIROpConversion::FIROpConversion;
570
571 mlir::LogicalResult
matchAndRewrite__anon447c3e180411::BoxRankOpConversion572 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor,
573 mlir::ConversionPatternRewriter &rewriter) const override {
574 mlir::Value a = adaptor.getOperands()[0];
575 auto loc = boxrank.getLoc();
576 mlir::Type ty = convertType(boxrank.getType());
577 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox);
578 rewriter.replaceOp(boxrank, result);
579 return mlir::success();
580 }
581 };
582
583 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the
584 /// boxproc.
585 /// TODO: Part of supporting Fortran 2003 procedure pointers.
586 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> {
587 using FIROpConversion::FIROpConversion;
588
589 mlir::LogicalResult
matchAndRewrite__anon447c3e180411::BoxProcHostOpConversion590 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor,
591 mlir::ConversionPatternRewriter &rewriter) const override {
592 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen");
593 return mlir::failure();
594 }
595 };
596
597 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type
598 /// descriptor from the box.
599 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> {
600 using FIROpConversion::FIROpConversion;
601
602 mlir::LogicalResult
matchAndRewrite__anon447c3e180411::BoxTypeDescOpConversion603 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor,
604 mlir::ConversionPatternRewriter &rewriter) const override {
605 mlir::Value box = adaptor.getOperands()[0];
606 auto loc = boxtypedesc.getLoc();
607 mlir::Type typeTy =
608 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext());
609 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox);
610 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy);
611 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy,
612 result);
613 return mlir::success();
614 }
615 };
616
617 /// Lower `fir.string_lit` to LLVM IR dialect operation.
618 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> {
619 using FIROpConversion::FIROpConversion;
620
621 mlir::LogicalResult
matchAndRewrite__anon447c3e180411::StringLitOpConversion622 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor,
623 mlir::ConversionPatternRewriter &rewriter) const override {
624 auto ty = convertType(constop.getType());
625 auto attr = constop.getValue();
626 if (attr.isa<mlir::StringAttr>()) {
627 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr);
628 return mlir::success();
629 }
630
631 auto charTy = constop.getType().cast<fir::CharacterType>();
632 unsigned bits = lowerTy().characterBitsize(charTy);
633 mlir::Type intTy = rewriter.getIntegerType(bits);
634 mlir::Location loc = constop.getLoc();
635 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
636 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) {
637 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr);
638 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) {
639 for (auto a : llvm::enumerate(arr.getValue())) {
640 // convert each character to a precise bitsize
641 auto elemAttr = mlir::IntegerAttr::get(
642 intTy,
643 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits));
644 auto elemCst =
645 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr);
646 auto index = mlir::ArrayAttr::get(
647 constop.getContext(), rewriter.getI32IntegerAttr(a.index()));
648 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst,
649 index);
650 }
651 } else {
652 return mlir::failure();
653 }
654 rewriter.replaceOp(constop, cst);
655 return mlir::success();
656 }
657 };
658
659 /// `fir.call` -> `llvm.call`
660 struct CallOpConversion : public FIROpConversion<fir::CallOp> {
661 using FIROpConversion::FIROpConversion;
662
663 mlir::LogicalResult
matchAndRewrite__anon447c3e180411::CallOpConversion664 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor,
665 mlir::ConversionPatternRewriter &rewriter) const override {
666 llvm::SmallVector<mlir::Type> resultTys;
667 for (auto r : call.getResults())
668 resultTys.push_back(convertType(r.getType()));
669 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>(
670 call, resultTys, adaptor.getOperands(), call->getAttrs());
671 return mlir::success();
672 }
673 };
674 } // namespace
675
getComplexEleTy(mlir::Type complex)676 static mlir::Type getComplexEleTy(mlir::Type complex) {
677 if (auto cc = complex.dyn_cast<mlir::ComplexType>())
678 return cc.getElementType();
679 return complex.cast<fir::ComplexType>().getElementType();
680 }
681
682 namespace {
683 /// Compare complex values
684 ///
685 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une).
686 ///
687 /// For completeness, all other comparison are done on the real component only.
688 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> {
689 using FIROpConversion::FIROpConversion;
690
691 mlir::LogicalResult
matchAndRewrite__anon447c3e180511::CmpcOpConversion692 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor,
693 mlir::ConversionPatternRewriter &rewriter) const override {
694 mlir::ValueRange operands = adaptor.getOperands();
695 mlir::MLIRContext *ctxt = cmp.getContext();
696 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType()));
697 mlir::Type resTy = convertType(cmp.getType());
698 mlir::Location loc = cmp.getLoc();
699 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0));
700 llvm::SmallVector<mlir::Value, 2> rp = {
701 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0],
702 pos0),
703 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1],
704 pos0)};
705 auto rcp =
706 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs());
707 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1));
708 llvm::SmallVector<mlir::Value, 2> ip = {
709 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0],
710 pos1),
711 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1],
712 pos1)};
713 auto icp =
714 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs());
715 llvm::SmallVector<mlir::Value, 2> cp = {rcp, icp};
716 switch (cmp.getPredicate()) {
717 case mlir::arith::CmpFPredicate::OEQ: // .EQ.
718 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp);
719 break;
720 case mlir::arith::CmpFPredicate::UNE: // .NE.
721 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp);
722 break;
723 default:
724 rewriter.replaceOp(cmp, rcp.getResult());
725 break;
726 }
727 return mlir::success();
728 }
729 };
730
731 /// Lower complex constants
732 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> {
733 using FIROpConversion::FIROpConversion;
734
735 mlir::LogicalResult
matchAndRewrite__anon447c3e180511::ConstcOpConversion736 matchAndRewrite(fir::ConstcOp conc, OpAdaptor,
737 mlir::ConversionPatternRewriter &rewriter) const override {
738 mlir::Location loc = conc.getLoc();
739 mlir::MLIRContext *ctx = conc.getContext();
740 mlir::Type ty = convertType(conc.getType());
741 mlir::Type ety = convertType(getComplexEleTy(conc.getType()));
742 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal()));
743 auto realPart =
744 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr);
745 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary()));
746 auto imPart =
747 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr);
748 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
749 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
750 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
751 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>(
752 loc, ty, undef, realPart, realIndex);
753 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal,
754 imPart, imIndex);
755 return mlir::success();
756 }
757
getValue__anon447c3e180511::ConstcOpConversion758 inline llvm::APFloat getValue(mlir::Attribute attr) const {
759 return attr.cast<fir::RealAttr>().getValue();
760 }
761 };
762
763 /// convert value of from-type to value of to-type
764 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> {
765 using FIROpConversion::FIROpConversion;
766
isFloatingPointTy__anon447c3e180511::ConvertOpConversion767 static bool isFloatingPointTy(mlir::Type ty) {
768 return ty.isa<mlir::FloatType>();
769 }
770
771 mlir::LogicalResult
matchAndRewrite__anon447c3e180511::ConvertOpConversion772 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor,
773 mlir::ConversionPatternRewriter &rewriter) const override {
774 auto fromFirTy = convert.getValue().getType();
775 auto toFirTy = convert.getRes().getType();
776 auto fromTy = convertType(fromFirTy);
777 auto toTy = convertType(toFirTy);
778 mlir::Value op0 = adaptor.getOperands()[0];
779 if (fromTy == toTy) {
780 rewriter.replaceOp(convert, op0);
781 return mlir::success();
782 }
783 auto loc = convert.getLoc();
784 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits,
785 unsigned toBits, mlir::Type toTy) -> mlir::Value {
786 if (fromBits == toBits) {
787 // TODO: Converting between two floating-point representations with the
788 // same bitwidth is not allowed for now.
789 mlir::emitError(loc,
790 "cannot implicitly convert between two floating-point "
791 "representations of the same bitwidth");
792 return {};
793 }
794 if (fromBits > toBits)
795 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val);
796 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val);
797 };
798 // Complex to complex conversion.
799 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) {
800 // Special case: handle the conversion of a complex such that both the
801 // real and imaginary parts are converted together.
802 auto zero = mlir::ArrayAttr::get(convert.getContext(),
803 rewriter.getI32IntegerAttr(0));
804 auto one = mlir::ArrayAttr::get(convert.getContext(),
805 rewriter.getI32IntegerAttr(1));
806 auto ty = convertType(getComplexEleTy(convert.getValue().getType()));
807 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero);
808 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one);
809 auto nt = convertType(getComplexEleTy(convert.getRes().getType()));
810 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty);
811 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt);
812 auto rc = convertFpToFp(rp, fromBits, toBits, nt);
813 auto ic = convertFpToFp(ip, fromBits, toBits, nt);
814 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy);
815 auto i1 =
816 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero);
817 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1,
818 ic, one);
819 return mlir::success();
820 }
821
822 // Follow UNIX F77 convention for logicals:
823 // 1. underlying integer is not zero => logical is .TRUE.
824 // 2. logical is .TRUE. => set underlying integer to 1.
825 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1);
826 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) {
827 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0);
828 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>(
829 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero);
830 return mlir::success();
831 }
832 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) {
833 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0);
834 return mlir::success();
835 }
836
837 // Floating point to floating point conversion.
838 if (isFloatingPointTy(fromTy)) {
839 if (isFloatingPointTy(toTy)) {
840 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy);
841 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy);
842 auto v = convertFpToFp(op0, fromBits, toBits, toTy);
843 rewriter.replaceOp(convert, v);
844 return mlir::success();
845 }
846 if (toTy.isa<mlir::IntegerType>()) {
847 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0);
848 return mlir::success();
849 }
850 } else if (fromTy.isa<mlir::IntegerType>()) {
851 // Integer to integer conversion.
852 if (toTy.isa<mlir::IntegerType>()) {
853 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy);
854 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy);
855 assert(fromBits != toBits);
856 if (fromBits > toBits) {
857 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0);
858 return mlir::success();
859 }
860 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0);
861 return mlir::success();
862 }
863 // Integer to floating point conversion.
864 if (isFloatingPointTy(toTy)) {
865 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0);
866 return mlir::success();
867 }
868 // Integer to pointer conversion.
869 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) {
870 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0);
871 return mlir::success();
872 }
873 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) {
874 // Pointer to integer conversion.
875 if (toTy.isa<mlir::IntegerType>()) {
876 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0);
877 return mlir::success();
878 }
879 // Pointer to pointer conversion.
880 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) {
881 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0);
882 return mlir::success();
883 }
884 }
885 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy;
886 }
887 };
888
889 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch
890 /// table.
891 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> {
892 using FIROpConversion::FIROpConversion;
893
894 mlir::LogicalResult
matchAndRewrite__anon447c3e180511::DispatchOpConversion895 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor,
896 mlir::ConversionPatternRewriter &rewriter) const override {
897 TODO(dispatch.getLoc(), "fir.dispatch codegen");
898 return mlir::failure();
899 }
900 };
901
902 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran
903 /// derived type.
904 struct DispatchTableOpConversion
905 : public FIROpConversion<fir::DispatchTableOp> {
906 using FIROpConversion::FIROpConversion;
907
908 mlir::LogicalResult
matchAndRewrite__anon447c3e180511::DispatchTableOpConversion909 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor,
910 mlir::ConversionPatternRewriter &rewriter) const override {
911 TODO(dispTab.getLoc(), "fir.dispatch_table codegen");
912 return mlir::failure();
913 }
914 };
915
916 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a
917 /// method-name to a function.
918 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> {
919 using FIROpConversion::FIROpConversion;
920
921 mlir::LogicalResult
matchAndRewrite__anon447c3e180511::DTEntryOpConversion922 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor,
923 mlir::ConversionPatternRewriter &rewriter) const override {
924 TODO(dtEnt.getLoc(), "fir.dt_entry codegen");
925 return mlir::failure();
926 }
927 };
928
929 /// Lower `fir.global_len` operation.
930 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> {
931 using FIROpConversion::FIROpConversion;
932
933 mlir::LogicalResult
matchAndRewrite__anon447c3e180511::GlobalLenOpConversion934 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor,
935 mlir::ConversionPatternRewriter &rewriter) const override {
936 TODO(globalLen.getLoc(), "fir.global_len codegen");
937 return mlir::failure();
938 }
939 };
940
941 /// Lower fir.len_param_index
942 struct LenParamIndexOpConversion
943 : public FIROpConversion<fir::LenParamIndexOp> {
944 using FIROpConversion::FIROpConversion;
945
946 // FIXME: this should be specialized by the runtime target
947 mlir::LogicalResult
matchAndRewrite__anon447c3e180511::LenParamIndexOpConversion948 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor,
949 mlir::ConversionPatternRewriter &rewriter) const override {
950 TODO(lenp.getLoc(), "fir.len_param_index codegen");
951 }
952 };
953
954 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of
955 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element
956 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd
957 /// element is the length of the character buffer (`#n`).
958 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> {
959 using FIROpConversion::FIROpConversion;
960
961 mlir::LogicalResult
matchAndRewrite__anon447c3e180511::EmboxCharOpConversion962 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor,
963 mlir::ConversionPatternRewriter &rewriter) const override {
964 mlir::ValueRange operands = adaptor.getOperands();
965 auto *ctx = emboxChar.getContext();
966
967 mlir::Value charBuffer = operands[0];
968 mlir::Value charBufferLen = operands[1];
969
970 mlir::Location loc = emboxChar.getLoc();
971 mlir::Type llvmStructTy = convertType(emboxChar.getType());
972 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy);
973
974 mlir::Type lenTy =
975 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1];
976 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen);
977
978 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
979 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
980 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>(
981 loc, llvmStructTy, llvmStruct, charBuffer, c0);
982 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
983 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1);
984
985 return mlir::success();
986 }
987 };
988 } // namespace
989
990 /// Return the LLVMFuncOp corresponding to the standard malloc call.
991 static mlir::LLVM::LLVMFuncOp
getMalloc(fir::AllocMemOp op,mlir::ConversionPatternRewriter & rewriter)992 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) {
993 auto module = op->getParentOfType<mlir::ModuleOp>();
994 if (mlir::LLVM::LLVMFuncOp mallocFunc =
995 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc"))
996 return mallocFunc;
997 mlir::OpBuilder moduleBuilder(
998 op->getParentOfType<mlir::ModuleOp>().getBodyRegion());
999 auto indexType = mlir::IntegerType::get(op.getContext(), 64);
1000 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>(
1001 rewriter.getUnknownLoc(), "malloc",
1002 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()),
1003 indexType,
1004 /*isVarArg=*/false));
1005 }
1006
1007 /// Helper function for generating the LLVM IR that computes the size
1008 /// in bytes for a derived type.
1009 static mlir::Value
computeDerivedTypeSize(mlir::Location loc,mlir::Type ptrTy,mlir::Type idxTy,mlir::ConversionPatternRewriter & rewriter)1010 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy,
1011 mlir::ConversionPatternRewriter &rewriter) {
1012 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy);
1013 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
1014 llvm::SmallVector<mlir::Value> args = {one};
1015 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args);
1016 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep);
1017 }
1018
1019 namespace {
1020 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc`
1021 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> {
1022 using FIROpConversion::FIROpConversion;
1023
1024 mlir::LogicalResult
matchAndRewrite__anon447c3e180711::AllocMemOpConversion1025 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor,
1026 mlir::ConversionPatternRewriter &rewriter) const override {
1027 mlir::Type heapTy = heap.getType();
1028 mlir::Type ty = convertType(heapTy);
1029 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter);
1030 mlir::Location loc = heap.getLoc();
1031 auto ity = lowerTy().indexType();
1032 mlir::Type dataTy = fir::unwrapRefType(heapTy);
1033 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy)))
1034 TODO(loc, "fir.allocmem codegen of derived type with length parameters");
1035 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty);
1036 if (auto scaleSize = genAllocationScaleSize(heap, ity, rewriter))
1037 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
1038 for (mlir::Value opnd : adaptor.getOperands())
1039 size = rewriter.create<mlir::LLVM::MulOp>(
1040 loc, ity, size, integerCast(loc, rewriter, ity, opnd));
1041 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc));
1042 auto malloc = rewriter.create<mlir::LLVM::CallOp>(
1043 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs());
1044 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty,
1045 malloc.getResult(0));
1046 return mlir::success();
1047 }
1048
1049 // Compute the (allocation) size of the allocmem type in bytes.
genTypeSizeInBytes__anon447c3e180711::AllocMemOpConversion1050 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy,
1051 mlir::ConversionPatternRewriter &rewriter,
1052 mlir::Type llTy) const {
1053 // Use the primitive size, if available.
1054 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>();
1055 if (auto size =
1056 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType()))
1057 return genConstantIndex(loc, idxTy, rewriter, size / 8);
1058
1059 // Otherwise, generate the GEP trick in LLVM IR to compute the size.
1060 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter);
1061 }
1062 };
1063 } // namespace
1064
1065 /// Return the LLVMFuncOp corresponding to the standard free call.
1066 static mlir::LLVM::LLVMFuncOp
getFree(fir::FreeMemOp op,mlir::ConversionPatternRewriter & rewriter)1067 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) {
1068 auto module = op->getParentOfType<mlir::ModuleOp>();
1069 if (mlir::LLVM::LLVMFuncOp freeFunc =
1070 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free"))
1071 return freeFunc;
1072 mlir::OpBuilder moduleBuilder(module.getBodyRegion());
1073 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext());
1074 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>(
1075 rewriter.getUnknownLoc(), "free",
1076 mlir::LLVM::LLVMFunctionType::get(voidType,
1077 getVoidPtrType(op.getContext()),
1078 /*isVarArg=*/false));
1079 }
1080
getDimension(mlir::LLVM::LLVMArrayType ty)1081 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) {
1082 unsigned result = 1;
1083 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>();
1084 eleTy;
1085 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>())
1086 ++result;
1087 return result;
1088 }
1089
1090 namespace {
1091 /// Lower a `fir.freemem` instruction into `llvm.call @free`
1092 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> {
1093 using FIROpConversion::FIROpConversion;
1094
1095 mlir::LogicalResult
matchAndRewrite__anon447c3e180811::FreeMemOpConversion1096 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor,
1097 mlir::ConversionPatternRewriter &rewriter) const override {
1098 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter);
1099 mlir::Location loc = freemem.getLoc();
1100 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>(
1101 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]);
1102 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc));
1103 rewriter.create<mlir::LLVM::CallOp>(
1104 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs());
1105 rewriter.eraseOp(freemem);
1106 return mlir::success();
1107 }
1108 };
1109 } // namespace
1110
1111 /// Common base class for embox to descriptor conversion.
1112 template <typename OP>
1113 struct EmboxCommonConversion : public FIROpConversion<OP> {
1114 using FIROpConversion<OP>::FIROpConversion;
1115
1116 // Find the LLVMFuncOp in whose entry block the alloca should be inserted.
1117 // The order to find the LLVMFuncOp is as follows:
1118 // 1. The parent operation of the current block if it is a LLVMFuncOp.
1119 // 2. The first ancestor that is a LLVMFuncOp.
1120 mlir::LLVM::LLVMFuncOp
getFuncForAllocaInsertEmboxCommonConversion1121 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const {
1122 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp();
1123 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp)
1124 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp)
1125 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>();
1126 }
1127
1128 // Generate an alloca of size 1 and type \p toTy.
1129 mlir::LLVM::AllocaOp
genAllocaWithTypeEmboxCommonConversion1130 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment,
1131 mlir::ConversionPatternRewriter &rewriter) const {
1132 auto thisPt = rewriter.saveInsertionPoint();
1133 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter);
1134 rewriter.setInsertionPointToStart(&func.front());
1135 auto size = this->genI32Constant(loc, rewriter, 1);
1136 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment);
1137 rewriter.restoreInsertionPoint(thisPt);
1138 return al;
1139 }
1140
getCFIAttrEmboxCommonConversion1141 static int getCFIAttr(fir::BoxType boxTy) {
1142 auto eleTy = boxTy.getEleTy();
1143 if (eleTy.isa<fir::PointerType>())
1144 return CFI_attribute_pointer;
1145 if (eleTy.isa<fir::HeapType>())
1146 return CFI_attribute_allocatable;
1147 return CFI_attribute_other;
1148 }
1149
unwrapIfDerivedEmboxCommonConversion1150 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) {
1151 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy))
1152 .template dyn_cast<fir::RecordType>();
1153 }
isDerivedTypeWithLenParamsEmboxCommonConversion1154 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) {
1155 auto recTy = unwrapIfDerived(boxTy);
1156 return recTy && recTy.getNumLenParams() > 0;
1157 }
isDerivedTypeEmboxCommonConversion1158 static bool isDerivedType(fir::BoxType boxTy) {
1159 return static_cast<bool>(unwrapIfDerived(boxTy));
1160 }
1161
1162 // Get the element size and CFI type code of the boxed value.
getSizeAndTypeCodeEmboxCommonConversion1163 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode(
1164 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter,
1165 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const {
1166 auto doInteger =
__anon447c3e180902(unsigned width) 1167 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> {
1168 int typeCode = fir::integerBitsToTypeCode(width);
1169 return {this->genConstantOffset(loc, rewriter, width / 8),
1170 this->genConstantOffset(loc, rewriter, typeCode)};
1171 };
1172 auto doLogical =
__anon447c3e180a02(unsigned width) 1173 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> {
1174 int typeCode = fir::logicalBitsToTypeCode(width);
1175 return {this->genConstantOffset(loc, rewriter, width / 8),
1176 this->genConstantOffset(loc, rewriter, typeCode)};
1177 };
__anon447c3e180b02(unsigned width) 1178 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> {
1179 int typeCode = fir::realBitsToTypeCode(width);
1180 return {this->genConstantOffset(loc, rewriter, width / 8),
1181 this->genConstantOffset(loc, rewriter, typeCode)};
1182 };
1183 auto doComplex =
__anon447c3e180c02(unsigned width) 1184 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> {
1185 auto typeCode = fir::complexBitsToTypeCode(width);
1186 return {this->genConstantOffset(loc, rewriter, width / 8 * 2),
1187 this->genConstantOffset(loc, rewriter, typeCode)};
1188 };
1189 auto doCharacter =
1190 [&](unsigned width,
__anon447c3e180d02(unsigned width, mlir::Value len) 1191 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> {
1192 auto typeCode = fir::characterBitsToTypeCode(width);
1193 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode);
1194 if (width == 8)
1195 return {len, typeCodeVal};
1196 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64);
1197 auto byteWidth = genConstantIndex(loc, i64Ty, rewriter, width / 8);
1198 auto len64 = FIROpConversion<OP>::integerCast(loc, rewriter, i64Ty, len);
1199 auto size =
1200 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len64);
1201 return {size, typeCodeVal};
1202 };
__anon447c3e180e02() 1203 auto getKindMap = [&]() -> fir::KindMapping & {
1204 return this->lowerTy().getKindMap();
1205 };
1206 // Pointer-like types.
1207 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy))
1208 boxEleTy = eleTy;
1209 // Integer types.
1210 if (fir::isa_integer(boxEleTy)) {
1211 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>())
1212 return doInteger(ty.getWidth());
1213 auto ty = boxEleTy.cast<fir::IntegerType>();
1214 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind()));
1215 }
1216 // Floating point types.
1217 if (fir::isa_real(boxEleTy)) {
1218 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>())
1219 return doFloat(ty.getWidth());
1220 auto ty = boxEleTy.cast<fir::RealType>();
1221 return doFloat(getKindMap().getRealBitsize(ty.getFKind()));
1222 }
1223 // Complex types.
1224 if (fir::isa_complex(boxEleTy)) {
1225 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>())
1226 return doComplex(
1227 ty.getElementType().cast<mlir::FloatType>().getWidth());
1228 auto ty = boxEleTy.cast<fir::ComplexType>();
1229 return doComplex(getKindMap().getRealBitsize(ty.getFKind()));
1230 }
1231 // Character types.
1232 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) {
1233 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind());
1234 if (ty.getLen() != fir::CharacterType::unknownLen()) {
1235 auto len = this->genConstantOffset(loc, rewriter, ty.getLen());
1236 return doCharacter(charWidth, len);
1237 }
1238 assert(!lenParams.empty());
1239 return doCharacter(charWidth, lenParams.back());
1240 }
1241 // Logical type.
1242 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>())
1243 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind()));
1244 // Array types.
1245 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>())
1246 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams);
1247 // Derived-type types.
1248 if (boxEleTy.isa<fir::RecordType>()) {
1249 auto ptrTy = mlir::LLVM::LLVMPointerType::get(
1250 this->lowerTy().convertType(boxEleTy));
1251 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy);
1252 auto one =
1253 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1);
1254 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr,
1255 mlir::ValueRange{one});
1256 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>(
1257 loc, this->lowerTy().indexType(), gep);
1258 return {eleSize,
1259 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())};
1260 }
1261 // Reference type.
1262 if (fir::isa_ref_type(boxEleTy)) {
1263 // FIXME: use the target pointer size rather than sizeof(void*)
1264 return {this->genConstantOffset(loc, rewriter, sizeof(void *)),
1265 this->genConstantOffset(loc, rewriter, CFI_type_cptr)};
1266 }
1267 fir::emitFatalError(loc, "unhandled type in fir.box code generation");
1268 }
1269
1270 /// Basic pattern to write a field in the descriptor
insertFieldEmboxCommonConversion1271 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter,
1272 mlir::Location loc, mlir::Value dest,
1273 llvm::ArrayRef<unsigned> fldIndexes,
1274 mlir::Value value, bool bitcast = false) const {
1275 auto boxTy = dest.getType();
1276 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes);
1277 if (bitcast)
1278 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value);
1279 else
1280 value = this->integerCast(loc, rewriter, fldTy, value);
1281 llvm::SmallVector<mlir::Attribute, 2> attrs;
1282 for (auto i : fldIndexes)
1283 attrs.push_back(rewriter.getI32IntegerAttr(i));
1284 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs);
1285 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value,
1286 indexesAttr);
1287 }
1288
1289 inline mlir::Value
insertBaseAddressEmboxCommonConversion1290 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter,
1291 mlir::Location loc, mlir::Value dest,
1292 mlir::Value base) const {
1293 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base,
1294 /*bitCast=*/true);
1295 }
1296
insertLowerBoundEmboxCommonConversion1297 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter,
1298 mlir::Location loc, mlir::Value dest,
1299 unsigned dim, mlir::Value lb) const {
1300 return insertField(rewriter, loc, dest,
1301 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb);
1302 }
1303
insertExtentEmboxCommonConversion1304 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter,
1305 mlir::Location loc, mlir::Value dest,
1306 unsigned dim, mlir::Value extent) const {
1307 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos},
1308 extent);
1309 }
1310
insertStrideEmboxCommonConversion1311 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter,
1312 mlir::Location loc, mlir::Value dest,
1313 unsigned dim, mlir::Value stride) const {
1314 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos},
1315 stride);
1316 }
1317
1318 /// Get the address of the type descriptor global variable that was created by
1319 /// lowering for derived type \p recType.
1320 template <typename BOX>
1321 mlir::Value
getTypeDescriptorEmboxCommonConversion1322 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter,
1323 mlir::Location loc, fir::RecordType recType) const {
1324 std::string name =
1325 fir::NameUniquer::getTypeDescriptorName(recType.getName());
1326 auto module = box->template getParentOfType<mlir::ModuleOp>();
1327 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) {
1328 auto ty = mlir::LLVM::LLVMPointerType::get(
1329 this->lowerTy().convertType(global.getType()));
1330 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty,
1331 global.getSymName());
1332 }
1333 if (auto global =
1334 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) {
1335 // The global may have already been translated to LLVM.
1336 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType());
1337 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty,
1338 global.getSymName());
1339 }
1340 // Type info derived types do not have type descriptors since they are the
1341 // types defining type descriptors.
1342 if (!this->options.ignoreMissingTypeDescriptors &&
1343 !fir::NameUniquer::belongsToModule(
1344 name, Fortran::semantics::typeInfoBuiltinModule))
1345 fir::emitFatalError(
1346 loc, "runtime derived type info descriptor was not generated");
1347 return rewriter.create<mlir::LLVM::NullOp>(
1348 loc, ::getVoidPtrType(box.getContext()));
1349 }
1350
1351 template <typename BOX>
1352 std::tuple<fir::BoxType, mlir::Value, mlir::Value>
consDescriptorPrefixEmboxCommonConversion1353 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter,
1354 unsigned rank, mlir::ValueRange lenParams) const {
1355 auto loc = box.getLoc();
1356 auto boxTy = box.getType().template dyn_cast<fir::BoxType>();
1357 auto convTy = this->lowerTy().convertBoxType(boxTy, rank);
1358 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>();
1359 auto llvmBoxTy = llvmBoxPtrTy.getElementType();
1360 mlir::Value descriptor =
1361 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy);
1362
1363 llvm::SmallVector<mlir::Value> typeparams = lenParams;
1364 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) {
1365 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy()))
1366 typeparams.push_back(box.substr()[1]);
1367 }
1368
1369 // Write each of the fields with the appropriate values
1370 auto [eleSize, cfiTy] =
1371 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams);
1372 descriptor =
1373 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize);
1374 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox},
1375 this->genI32Constant(loc, rewriter, CFI_VERSION));
1376 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox},
1377 this->genI32Constant(loc, rewriter, rank));
1378 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy);
1379 descriptor =
1380 insertField(rewriter, loc, descriptor, {kAttributePosInBox},
1381 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy)));
1382 const bool hasAddendum = isDerivedType(boxTy);
1383 descriptor =
1384 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox},
1385 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0));
1386
1387 if (hasAddendum) {
1388 auto isArray =
1389 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>();
1390 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox;
1391 auto typeDesc =
1392 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy));
1393 descriptor =
1394 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc,
1395 /*bitCast=*/true);
1396 }
1397
1398 return {boxTy, descriptor, eleSize};
1399 }
1400
1401 // Compute the base address of a fir.box given the indices from the slice.
1402 // The indices from the "outer" dimensions (every dimension after the first
1403 // one (inlcuded) that is not a compile time constant) must have been
1404 // multiplied with the related extents and added together into \p outerOffset.
1405 mlir::Value
genBoxOffsetGepEmboxCommonConversion1406 genBoxOffsetGep(mlir::ConversionPatternRewriter &rewriter, mlir::Location loc,
1407 mlir::Value base, mlir::Value outerOffset,
1408 mlir::ValueRange cstInteriorIndices,
1409 mlir::ValueRange componentIndices,
1410 llvm::Optional<mlir::Value> substringOffset) const {
1411 llvm::SmallVector<mlir::Value> gepArgs{outerOffset};
1412 mlir::Type resultTy =
1413 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType();
1414 // Fortran is column major, llvm GEP is row major: reverse the indices here.
1415 for (mlir::Value interiorIndex : llvm::reverse(cstInteriorIndices)) {
1416 auto arrayTy = resultTy.dyn_cast<mlir::LLVM::LLVMArrayType>();
1417 if (!arrayTy)
1418 fir::emitFatalError(
1419 loc,
1420 "corrupted GEP generated being generated in fir.embox/fir.rebox");
1421 resultTy = arrayTy.getElementType();
1422 gepArgs.push_back(interiorIndex);
1423 }
1424 for (mlir::Value componentIndex : componentIndices) {
1425 // Component indices can be field index to select a component, or array
1426 // index, to select an element in an array component.
1427 if (auto structTy = resultTy.dyn_cast<mlir::LLVM::LLVMStructType>()) {
1428 std::int64_t cstIndex = getConstantIntValue(componentIndex);
1429 resultTy = structTy.getBody()[cstIndex];
1430 } else if (auto arrayTy =
1431 resultTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) {
1432 resultTy = arrayTy.getElementType();
1433 } else {
1434 fir::emitFatalError(loc, "corrupted component GEP generated being "
1435 "generated in fir.embox/fir.rebox");
1436 }
1437 gepArgs.push_back(componentIndex);
1438 }
1439 if (substringOffset) {
1440 if (auto arrayTy = resultTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) {
1441 gepArgs.push_back(*substringOffset);
1442 resultTy = arrayTy.getElementType();
1443 } else {
1444 // If the CHARACTER length is dynamic, the whole base type should have
1445 // degenerated to an llvm.ptr<i[width]>, and there should not be any
1446 // cstInteriorIndices/componentIndices. The substring offset can be
1447 // added to the outterOffset since it applies on the same LLVM type.
1448 if (gepArgs.size() != 1)
1449 fir::emitFatalError(loc,
1450 "corrupted substring GEP in fir.embox/fir.rebox");
1451 mlir::Type outterOffsetTy = gepArgs[0].getType();
1452 mlir::Value cast =
1453 this->integerCast(loc, rewriter, outterOffsetTy, *substringOffset);
1454
1455 gepArgs[0] = rewriter.create<mlir::LLVM::AddOp>(loc, outterOffsetTy,
1456 gepArgs[0], cast);
1457 }
1458 }
1459 resultTy = mlir::LLVM::LLVMPointerType::get(resultTy);
1460 return rewriter.create<mlir::LLVM::GEPOp>(loc, resultTy, base, gepArgs);
1461 }
1462
1463 template <typename BOX>
1464 void
getSubcomponentIndicesEmboxCommonConversion1465 getSubcomponentIndices(BOX xbox, mlir::Value memref,
1466 mlir::ValueRange operands,
1467 mlir::SmallVectorImpl<mlir::Value> &indices) const {
1468 // For each field in the path add the offset to base via the args list.
1469 // In the most general case, some offsets must be computed since
1470 // they are not be known until runtime.
1471 if (fir::hasDynamicSize(fir::unwrapSequenceType(
1472 fir::unwrapPassByRefType(memref.getType()))))
1473 TODO(xbox.getLoc(),
1474 "fir.embox codegen dynamic size component in derived type");
1475 indices.append(operands.begin() + xbox.subcomponentOffset(),
1476 operands.begin() + xbox.subcomponentOffset() +
1477 xbox.subcomponent().size());
1478 }
1479
1480 /// If the embox is not in a globalOp body, allocate storage for the box;
1481 /// store the value inside and return the generated alloca. Return the input
1482 /// value otherwise.
1483 mlir::Value
placeInMemoryIfNotGlobalInitEmboxCommonConversion1484 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter,
1485 mlir::Location loc, mlir::Value boxValue) const {
1486 auto *thisBlock = rewriter.getInsertionBlock();
1487 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp()))
1488 return boxValue;
1489 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType());
1490 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter);
1491 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca);
1492 return alloca;
1493 }
1494 };
1495
1496 /// Compute the extent of a triplet slice (lb:ub:step).
1497 static mlir::Value
computeTripletExtent(mlir::ConversionPatternRewriter & rewriter,mlir::Location loc,mlir::Value lb,mlir::Value ub,mlir::Value step,mlir::Value zero,mlir::Type type)1498 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter,
1499 mlir::Location loc, mlir::Value lb, mlir::Value ub,
1500 mlir::Value step, mlir::Value zero, mlir::Type type) {
1501 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb);
1502 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step);
1503 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step);
1504 // If the resulting extent is negative (`ub-lb` and `step` have different
1505 // signs), zero must be returned instead.
1506 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
1507 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero);
1508 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero);
1509 }
1510
1511 /// Create a generic box on a memory reference. This conversions lowers the
1512 /// abstract box to the appropriate, initialized descriptor.
1513 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> {
1514 using EmboxCommonConversion::EmboxCommonConversion;
1515
1516 mlir::LogicalResult
matchAndRewriteEmboxOpConversion1517 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor,
1518 mlir::ConversionPatternRewriter &rewriter) const override {
1519 mlir::ValueRange operands = adaptor.getOperands();
1520 assert(!embox.getShape() && "There should be no dims on this embox op");
1521 auto [boxTy, dest, eleSize] = consDescriptorPrefix(
1522 embox, rewriter, /*rank=*/0, /*lenParams=*/operands.drop_front(1));
1523 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, operands[0]);
1524 if (isDerivedTypeWithLenParams(boxTy)) {
1525 TODO(embox.getLoc(),
1526 "fir.embox codegen of derived with length parameters");
1527 return mlir::failure();
1528 }
1529 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest);
1530 rewriter.replaceOp(embox, result);
1531 return mlir::success();
1532 }
1533 };
1534
1535 /// Create a generic box on a memory reference.
1536 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
1537 using EmboxCommonConversion::EmboxCommonConversion;
1538
1539 mlir::LogicalResult
matchAndRewriteXEmboxOpConversion1540 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor,
1541 mlir::ConversionPatternRewriter &rewriter) const override {
1542 mlir::ValueRange operands = adaptor.getOperands();
1543 auto [boxTy, dest, eleSize] =
1544 consDescriptorPrefix(xbox, rewriter, xbox.getOutRank(),
1545 operands.drop_front(xbox.lenParamOffset()));
1546 // Generate the triples in the dims field of the descriptor
1547 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64);
1548 mlir::Value base = operands[0];
1549 assert(!xbox.shape().empty() && "must have a shape");
1550 unsigned shapeOffset = xbox.shapeOffset();
1551 bool hasShift = !xbox.shift().empty();
1552 unsigned shiftOffset = xbox.shiftOffset();
1553 bool hasSlice = !xbox.slice().empty();
1554 unsigned sliceOffset = xbox.sliceOffset();
1555 mlir::Location loc = xbox.getLoc();
1556 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0);
1557 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1);
1558 mlir::Value prevPtrOff = one;
1559 mlir::Type eleTy = boxTy.getEleTy();
1560 const unsigned rank = xbox.getRank();
1561 llvm::SmallVector<mlir::Value> cstInteriorIndices;
1562 unsigned constRows = 0;
1563 mlir::Value ptrOffset = zero;
1564 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType());
1565 assert(memEleTy.isa<fir::SequenceType>());
1566 auto seqTy = memEleTy.cast<fir::SequenceType>();
1567 mlir::Type seqEleTy = seqTy.getEleTy();
1568 // Adjust the element scaling factor if the element is a dependent type.
1569 if (fir::hasDynamicSize(seqEleTy)) {
1570 if (auto charTy = seqEleTy.dyn_cast<fir::CharacterType>()) {
1571 assert(xbox.lenParams().size() == 1);
1572 mlir::LLVM::ConstantOp charSize = genConstantIndex(
1573 loc, i64Ty, rewriter, lowerTy().characterBitsize(charTy) / 8);
1574 mlir::Value castedLen =
1575 integerCast(loc, rewriter, i64Ty, operands[xbox.lenParamOffset()]);
1576 auto byteOffset =
1577 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, charSize, castedLen);
1578 prevPtrOff = integerCast(loc, rewriter, i64Ty, byteOffset);
1579 } else if (seqEleTy.isa<fir::RecordType>()) {
1580 // prevPtrOff = ;
1581 TODO(loc, "generate call to calculate size of PDT");
1582 } else {
1583 fir::emitFatalError(loc, "unexpected dynamic type");
1584 }
1585 } else {
1586 constRows = seqTy.getConstantRows();
1587 }
1588
1589 const auto hasSubcomp = !xbox.subcomponent().empty();
1590 const bool hasSubstr = !xbox.substr().empty();
1591 /// Compute initial element stride that will be use to compute the step in
1592 /// each dimension.
1593 mlir::Value prevDimByteStride = integerCast(loc, rewriter, i64Ty, eleSize);
1594 if (hasSubcomp) {
1595 // We have a subcomponent. The step value needs to be the number of
1596 // bytes per element (which is a derived type).
1597 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy));
1598 prevDimByteStride = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter);
1599 } else if (hasSubstr) {
1600 // We have a substring. The step value needs to be the number of bytes
1601 // per CHARACTER element.
1602 auto charTy = seqEleTy.cast<fir::CharacterType>();
1603 if (fir::hasDynamicSize(charTy)) {
1604 prevDimByteStride = prevPtrOff;
1605 } else {
1606 prevDimByteStride = genConstantIndex(
1607 loc, i64Ty, rewriter,
1608 charTy.getLen() * lowerTy().characterBitsize(charTy) / 8);
1609 }
1610 }
1611
1612 // Process the array subspace arguments (shape, shift, etc.), if any,
1613 // translating everything to values in the descriptor wherever the entity
1614 // has a dynamic array dimension.
1615 for (unsigned di = 0, descIdx = 0; di < rank; ++di) {
1616 mlir::Value extent = operands[shapeOffset];
1617 mlir::Value outerExtent = extent;
1618 bool skipNext = false;
1619 if (hasSlice) {
1620 mlir::Value off = operands[sliceOffset];
1621 mlir::Value adj = one;
1622 if (hasShift)
1623 adj = operands[shiftOffset];
1624 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj);
1625 if (constRows > 0) {
1626 cstInteriorIndices.push_back(ao);
1627 } else {
1628 auto dimOff =
1629 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff);
1630 ptrOffset =
1631 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset);
1632 }
1633 if (mlir::isa_and_nonnull<fir::UndefOp>(
1634 xbox.slice()[3 * di + 1].getDefiningOp())) {
1635 // This dimension contains a scalar expression in the array slice op.
1636 // The dimension is loop invariant, will be dropped, and will not
1637 // appear in the descriptor.
1638 skipNext = true;
1639 }
1640 }
1641 if (!skipNext) {
1642 // store extent
1643 if (hasSlice)
1644 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset],
1645 operands[sliceOffset + 1],
1646 operands[sliceOffset + 2], zero, i64Ty);
1647 // Lower bound is normalized to 0 for BIND(C) interoperability.
1648 mlir::Value lb = zero;
1649 const bool isaPointerOrAllocatable =
1650 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>();
1651 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and
1652 // denormalized descriptors.
1653 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox))
1654 lb = one;
1655 // If there is a shifted origin, and no fir.slice, and this is not
1656 // a normalized descriptor then use the value from the shift op as
1657 // the lower bound.
1658 if (hasShift && !(hasSlice || hasSubcomp || hasSubstr) &&
1659 (isaPointerOrAllocatable || !normalizedLowerBound(xbox))) {
1660 lb = operands[shiftOffset];
1661 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>(
1662 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero);
1663 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one,
1664 lb);
1665 }
1666 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb);
1667
1668 dest = insertExtent(rewriter, loc, dest, descIdx, extent);
1669
1670 // store step (scaled by shaped extent)
1671 mlir::Value step = prevDimByteStride;
1672 if (hasSlice)
1673 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step,
1674 operands[sliceOffset + 2]);
1675 dest = insertStride(rewriter, loc, dest, descIdx, step);
1676 ++descIdx;
1677 }
1678
1679 // compute the stride and offset for the next natural dimension
1680 prevDimByteStride = rewriter.create<mlir::LLVM::MulOp>(
1681 loc, i64Ty, prevDimByteStride, outerExtent);
1682 if (constRows == 0)
1683 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff,
1684 outerExtent);
1685 else
1686 --constRows;
1687
1688 // increment iterators
1689 ++shapeOffset;
1690 if (hasShift)
1691 ++shiftOffset;
1692 if (hasSlice)
1693 sliceOffset += 3;
1694 }
1695 if (hasSlice || hasSubcomp || hasSubstr) {
1696 // Shift the base address.
1697 llvm::SmallVector<mlir::Value> fieldIndices;
1698 llvm::Optional<mlir::Value> substringOffset;
1699 if (hasSubcomp)
1700 getSubcomponentIndices(xbox, xbox.memref(), operands, fieldIndices);
1701 if (hasSubstr)
1702 substringOffset = operands[xbox.substrOffset()];
1703 base = genBoxOffsetGep(rewriter, loc, base, ptrOffset, cstInteriorIndices,
1704 fieldIndices, substringOffset);
1705 }
1706 dest = insertBaseAddress(rewriter, loc, dest, base);
1707 if (isDerivedTypeWithLenParams(boxTy))
1708 TODO(loc, "fir.embox codegen of derived with length parameters");
1709
1710 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest);
1711 rewriter.replaceOp(xbox, result);
1712 return mlir::success();
1713 }
1714
1715 /// Return true if `xbox` has a normalized lower bounds attribute. A box value
1716 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a
1717 /// zero origin lower bound for interoperability with BIND(C).
normalizedLowerBoundXEmboxOpConversion1718 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) {
1719 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName());
1720 }
1721 };
1722
1723 /// Create a new box given a box reference.
1724 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> {
1725 using EmboxCommonConversion::EmboxCommonConversion;
1726
1727 mlir::LogicalResult
matchAndRewriteXReboxOpConversion1728 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor,
1729 mlir::ConversionPatternRewriter &rewriter) const override {
1730 mlir::Location loc = rebox.getLoc();
1731 mlir::Type idxTy = lowerTy().indexType();
1732 mlir::Value loweredBox = adaptor.getOperands()[0];
1733 mlir::ValueRange operands = adaptor.getOperands();
1734
1735 // Create new descriptor and fill its non-shape related data.
1736 llvm::SmallVector<mlir::Value, 2> lenParams;
1737 mlir::Type inputEleTy = getInputEleTy(rebox);
1738 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) {
1739 mlir::Value len =
1740 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter);
1741 if (charTy.getFKind() != 1) {
1742 mlir::Value width =
1743 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind());
1744 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width);
1745 }
1746 lenParams.emplace_back(len);
1747 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) {
1748 if (recTy.getNumLenParams() != 0)
1749 TODO(loc, "reboxing descriptor of derived type with length parameters");
1750 }
1751 auto [boxTy, dest, eleSize] =
1752 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams);
1753
1754 // Read input extents, strides, and base address
1755 llvm::SmallVector<mlir::Value> inputExtents;
1756 llvm::SmallVector<mlir::Value> inputStrides;
1757 const unsigned inputRank = rebox.getRank();
1758 for (unsigned i = 0; i < inputRank; ++i) {
1759 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i);
1760 llvm::SmallVector<mlir::Value, 3> dimInfo =
1761 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter);
1762 inputExtents.emplace_back(dimInfo[1]);
1763 inputStrides.emplace_back(dimInfo[2]);
1764 }
1765
1766 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType());
1767 mlir::Value baseAddr =
1768 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter);
1769
1770 if (!rebox.slice().empty() || !rebox.subcomponent().empty())
1771 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides,
1772 operands, rewriter);
1773 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides,
1774 operands, rewriter);
1775 }
1776
1777 private:
1778 /// Write resulting shape and base address in descriptor, and replace rebox
1779 /// op.
1780 mlir::LogicalResult
finalizeReboxXReboxOpConversion1781 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base,
1782 mlir::ValueRange lbounds, mlir::ValueRange extents,
1783 mlir::ValueRange strides,
1784 mlir::ConversionPatternRewriter &rewriter) const {
1785 mlir::Location loc = rebox.getLoc();
1786 mlir::Value zero =
1787 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
1788 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1);
1789 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) {
1790 mlir::Value extent = std::get<0>(iter.value());
1791 unsigned dim = iter.index();
1792 mlir::Value lb = one;
1793 if (!lbounds.empty()) {
1794 lb = lbounds[dim];
1795 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>(
1796 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero);
1797 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb);
1798 };
1799 dest = insertLowerBound(rewriter, loc, dest, dim, lb);
1800 dest = insertExtent(rewriter, loc, dest, dim, extent);
1801 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value()));
1802 }
1803 dest = insertBaseAddress(rewriter, loc, dest, base);
1804 mlir::Value result =
1805 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest);
1806 rewriter.replaceOp(rebox, result);
1807 return mlir::success();
1808 }
1809
1810 // Apply slice given the base address, extents and strides of the input box.
1811 mlir::LogicalResult
sliceBoxXReboxOpConversion1812 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base,
1813 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides,
1814 mlir::ValueRange operands,
1815 mlir::ConversionPatternRewriter &rewriter) const {
1816 mlir::Location loc = rebox.getLoc();
1817 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext());
1818 mlir::Type idxTy = lowerTy().indexType();
1819 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0);
1820 // Apply subcomponent and substring shift on base address.
1821 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) {
1822 // Cast to inputEleTy* so that a GEP can be used.
1823 mlir::Type inputEleTy = getInputEleTy(rebox);
1824 auto llvmElePtrTy =
1825 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy));
1826 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base);
1827
1828 llvm::SmallVector<mlir::Value> fieldIndices;
1829 llvm::Optional<mlir::Value> substringOffset;
1830 if (!rebox.subcomponent().empty())
1831 getSubcomponentIndices(rebox, rebox.box(), operands, fieldIndices);
1832 if (!rebox.substr().empty())
1833 substringOffset = operands[rebox.substrOffset()];
1834 base = genBoxOffsetGep(rewriter, loc, base, zero,
1835 /*cstInteriorIndices=*/llvm::None, fieldIndices,
1836 substringOffset);
1837 }
1838
1839 if (rebox.slice().empty())
1840 // The array section is of the form array[%component][substring], keep
1841 // the input array extents and strides.
1842 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None,
1843 inputExtents, inputStrides, rewriter);
1844
1845 // Strides from the fir.box are in bytes.
1846 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
1847
1848 // The slice is of the form array(i:j:k)[%component]. Compute new extents
1849 // and strides.
1850 llvm::SmallVector<mlir::Value> slicedExtents;
1851 llvm::SmallVector<mlir::Value> slicedStrides;
1852 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
1853 const bool sliceHasOrigins = !rebox.shift().empty();
1854 unsigned sliceOps = rebox.sliceOffset();
1855 unsigned shiftOps = rebox.shiftOffset();
1856 auto strideOps = inputStrides.begin();
1857 const unsigned inputRank = inputStrides.size();
1858 for (unsigned i = 0; i < inputRank;
1859 ++i, ++strideOps, ++shiftOps, sliceOps += 3) {
1860 mlir::Value sliceLb =
1861 integerCast(loc, rewriter, idxTy, operands[sliceOps]);
1862 mlir::Value inputStride = *strideOps; // already idxTy
1863 // Apply origin shift: base += (lb-shift)*input_stride
1864 mlir::Value sliceOrigin =
1865 sliceHasOrigins
1866 ? integerCast(loc, rewriter, idxTy, operands[shiftOps])
1867 : one;
1868 mlir::Value diff =
1869 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin);
1870 mlir::Value offset =
1871 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride);
1872 base = genGEP(loc, voidPtrTy, rewriter, base, offset);
1873 // Apply upper bound and step if this is a triplet. Otherwise, the
1874 // dimension is dropped and no extents/strides are computed.
1875 mlir::Value upper = operands[sliceOps + 1];
1876 const bool isTripletSlice =
1877 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp());
1878 if (isTripletSlice) {
1879 mlir::Value step =
1880 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]);
1881 // extent = ub-lb+step/step
1882 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper);
1883 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb,
1884 sliceUb, step, zero, idxTy);
1885 slicedExtents.emplace_back(extent);
1886 // stride = step*input_stride
1887 mlir::Value stride =
1888 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride);
1889 slicedStrides.emplace_back(stride);
1890 }
1891 }
1892 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None,
1893 slicedExtents, slicedStrides, rewriter);
1894 }
1895
1896 /// Apply a new shape to the data described by a box given the base address,
1897 /// extents and strides of the box.
1898 mlir::LogicalResult
reshapeBoxXReboxOpConversion1899 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base,
1900 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides,
1901 mlir::ValueRange operands,
1902 mlir::ConversionPatternRewriter &rewriter) const {
1903 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(),
1904 operands.begin() + rebox.shiftOffset() +
1905 rebox.shift().size()};
1906 if (rebox.shape().empty()) {
1907 // Only setting new lower bounds.
1908 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents,
1909 inputStrides, rewriter);
1910 }
1911
1912 mlir::Location loc = rebox.getLoc();
1913 // Strides from the fir.box are in bytes.
1914 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext());
1915 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
1916
1917 llvm::SmallVector<mlir::Value> newStrides;
1918 llvm::SmallVector<mlir::Value> newExtents;
1919 mlir::Type idxTy = lowerTy().indexType();
1920 // First stride from input box is kept. The rest is assumed contiguous
1921 // (it is not possible to reshape otherwise). If the input is scalar,
1922 // which may be OK if all new extents are ones, the stride does not
1923 // matter, use one.
1924 mlir::Value stride = inputStrides.empty()
1925 ? genConstantIndex(loc, idxTy, rewriter, 1)
1926 : inputStrides[0];
1927 for (unsigned i = 0; i < rebox.shape().size(); ++i) {
1928 mlir::Value rawExtent = operands[rebox.shapeOffset() + i];
1929 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent);
1930 newExtents.emplace_back(extent);
1931 newStrides.emplace_back(stride);
1932 // nextStride = extent * stride;
1933 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride);
1934 }
1935 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides,
1936 rewriter);
1937 }
1938
1939 /// Return scalar element type of the input box.
getInputEleTyXReboxOpConversion1940 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) {
1941 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType());
1942 if (auto seqTy = ty.dyn_cast<fir::SequenceType>())
1943 return seqTy.getEleTy();
1944 return ty;
1945 }
1946 };
1947
1948 /// Lower `fir.emboxproc` operation. Creates a procedure box.
1949 /// TODO: Part of supporting Fortran 2003 procedure pointers.
1950 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> {
1951 using FIROpConversion::FIROpConversion;
1952
1953 mlir::LogicalResult
matchAndRewriteEmboxProcOpConversion1954 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor,
1955 mlir::ConversionPatternRewriter &rewriter) const override {
1956 TODO(emboxproc.getLoc(), "fir.emboxproc codegen");
1957 return mlir::failure();
1958 }
1959 };
1960
1961 // Code shared between insert_value and extract_value Ops.
1962 struct ValueOpCommon {
1963 // Translate the arguments pertaining to any multidimensional array to
1964 // row-major order for LLVM-IR.
toRowMajorValueOpCommon1965 static void toRowMajor(llvm::SmallVectorImpl<mlir::Attribute> &attrs,
1966 mlir::Type ty) {
1967 assert(ty && "type is null");
1968 const auto end = attrs.size();
1969 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) {
1970 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) {
1971 const auto dim = getDimension(seq);
1972 if (dim > 1) {
1973 auto ub = std::min(i + dim, end);
1974 std::reverse(attrs.begin() + i, attrs.begin() + ub);
1975 i += dim - 1;
1976 }
1977 ty = getArrayElementType(seq);
1978 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) {
1979 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()];
1980 } else {
1981 llvm_unreachable("index into invalid type");
1982 }
1983 }
1984 }
1985
1986 static llvm::SmallVector<mlir::Attribute>
collectIndicesValueOpCommon1987 collectIndices(mlir::ConversionPatternRewriter &rewriter,
1988 mlir::ArrayAttr arrAttr) {
1989 llvm::SmallVector<mlir::Attribute> attrs;
1990 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) {
1991 if (i->isa<mlir::IntegerAttr>()) {
1992 attrs.push_back(*i);
1993 } else {
1994 auto fieldName = i->cast<mlir::StringAttr>().getValue();
1995 ++i;
1996 auto ty = i->cast<mlir::TypeAttr>().getValue();
1997 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName);
1998 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index));
1999 }
2000 }
2001 return attrs;
2002 }
2003
2004 private:
getArrayElementTypeValueOpCommon2005 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) {
2006 auto eleTy = ty.getElementType();
2007 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>())
2008 eleTy = arrTy.getElementType();
2009 return eleTy;
2010 }
2011 };
2012
2013 namespace {
2014 /// Extract a subobject value from an ssa-value of aggregate type
2015 struct ExtractValueOpConversion
2016 : public FIROpAndTypeConversion<fir::ExtractValueOp>,
2017 public ValueOpCommon {
2018 using FIROpAndTypeConversion::FIROpAndTypeConversion;
2019
2020 mlir::LogicalResult
doRewrite__anon447c3e180f11::ExtractValueOpConversion2021 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor,
2022 mlir::ConversionPatternRewriter &rewriter) const override {
2023 mlir::ValueRange operands = adaptor.getOperands();
2024 auto attrs = collectIndices(rewriter, extractVal.getCoor());
2025 toRowMajor(attrs, operands[0].getType());
2026 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs);
2027 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(
2028 extractVal, ty, operands[0], position);
2029 return mlir::success();
2030 }
2031 };
2032
2033 /// InsertValue is the generalized instruction for the composition of new
2034 /// aggregate type values.
2035 struct InsertValueOpConversion
2036 : public FIROpAndTypeConversion<fir::InsertValueOp>,
2037 public ValueOpCommon {
2038 using FIROpAndTypeConversion::FIROpAndTypeConversion;
2039
2040 mlir::LogicalResult
doRewrite__anon447c3e180f11::InsertValueOpConversion2041 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor,
2042 mlir::ConversionPatternRewriter &rewriter) const override {
2043 mlir::ValueRange operands = adaptor.getOperands();
2044 auto attrs = collectIndices(rewriter, insertVal.getCoor());
2045 toRowMajor(attrs, operands[0].getType());
2046 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs);
2047 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
2048 insertVal, ty, operands[0], operands[1], position);
2049 return mlir::success();
2050 }
2051 };
2052
2053 /// InsertOnRange inserts a value into a sequence over a range of offsets.
2054 struct InsertOnRangeOpConversion
2055 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> {
2056 using FIROpAndTypeConversion::FIROpAndTypeConversion;
2057
2058 // Increments an array of subscripts in a row major fasion.
incrementSubscripts__anon447c3e180f11::InsertOnRangeOpConversion2059 void incrementSubscripts(const llvm::SmallVector<uint64_t> &dims,
2060 llvm::SmallVector<uint64_t> &subscripts) const {
2061 for (size_t i = dims.size(); i > 0; --i) {
2062 if (++subscripts[i - 1] < dims[i - 1]) {
2063 return;
2064 }
2065 subscripts[i - 1] = 0;
2066 }
2067 }
2068
2069 mlir::LogicalResult
doRewrite__anon447c3e180f11::InsertOnRangeOpConversion2070 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor,
2071 mlir::ConversionPatternRewriter &rewriter) const override {
2072
2073 llvm::SmallVector<uint64_t> dims;
2074 auto type = adaptor.getOperands()[0].getType();
2075
2076 // Iteratively extract the array dimensions from the type.
2077 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) {
2078 dims.push_back(t.getNumElements());
2079 type = t.getElementType();
2080 }
2081
2082 llvm::SmallVector<std::uint64_t> lBounds;
2083 llvm::SmallVector<std::uint64_t> uBounds;
2084
2085 // Unzip the upper and lower bound and convert to a row major format.
2086 mlir::DenseIntElementsAttr coor = range.getCoor();
2087 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>());
2088 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) {
2089 uBounds.push_back(*i++);
2090 lBounds.push_back(*i);
2091 }
2092
2093 auto &subscripts = lBounds;
2094 auto loc = range.getLoc();
2095 mlir::Value lastOp = adaptor.getOperands()[0];
2096 mlir::Value insertVal = adaptor.getOperands()[1];
2097
2098 auto i64Ty = rewriter.getI64Type();
2099 while (subscripts != uBounds) {
2100 // Convert uint64_t's to Attribute's.
2101 llvm::SmallVector<mlir::Attribute> subscriptAttrs;
2102 for (const auto &subscript : subscripts)
2103 subscriptAttrs.push_back(mlir::IntegerAttr::get(i64Ty, subscript));
2104 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>(
2105 loc, ty, lastOp, insertVal,
2106 mlir::ArrayAttr::get(range.getContext(), subscriptAttrs));
2107
2108 incrementSubscripts(dims, subscripts);
2109 }
2110
2111 // Convert uint64_t's to Attribute's.
2112 llvm::SmallVector<mlir::Attribute> subscriptAttrs;
2113 for (const auto &subscript : subscripts)
2114 subscriptAttrs.push_back(
2115 mlir::IntegerAttr::get(rewriter.getI64Type(), subscript));
2116 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs);
2117
2118 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
2119 range, ty, lastOp, insertVal,
2120 mlir::ArrayAttr::get(range.getContext(), arrayRef));
2121
2122 return mlir::success();
2123 }
2124 };
2125 } // namespace
2126
2127 namespace {
2128 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced,
2129 /// shifted etc. array.
2130 /// (See the static restriction on coordinate_of.) array_coor determines the
2131 /// coordinate (location) of a specific element.
2132 struct XArrayCoorOpConversion
2133 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> {
2134 using FIROpAndTypeConversion::FIROpAndTypeConversion;
2135
2136 mlir::LogicalResult
doRewrite__anon447c3e181011::XArrayCoorOpConversion2137 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor,
2138 mlir::ConversionPatternRewriter &rewriter) const override {
2139 auto loc = coor.getLoc();
2140 mlir::ValueRange operands = adaptor.getOperands();
2141 unsigned rank = coor.getRank();
2142 assert(coor.indices().size() == rank);
2143 assert(coor.shape().empty() || coor.shape().size() == rank);
2144 assert(coor.shift().empty() || coor.shift().size() == rank);
2145 assert(coor.slice().empty() || coor.slice().size() == 3 * rank);
2146 mlir::Type idxTy = lowerTy().indexType();
2147 unsigned indexOffset = coor.indicesOffset();
2148 unsigned shapeOffset = coor.shapeOffset();
2149 unsigned shiftOffset = coor.shiftOffset();
2150 unsigned sliceOffset = coor.sliceOffset();
2151 auto sliceOps = coor.slice().begin();
2152 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
2153 mlir::Value prevExt = one;
2154 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0);
2155 mlir::Value offset = zero;
2156 const bool isShifted = !coor.shift().empty();
2157 const bool isSliced = !coor.slice().empty();
2158 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>();
2159
2160 // For each dimension of the array, generate the offset calculation.
2161 for (unsigned i = 0; i < rank; ++i, ++indexOffset, ++shapeOffset,
2162 ++shiftOffset, sliceOffset += 3, sliceOps += 3) {
2163 mlir::Value index =
2164 integerCast(loc, rewriter, idxTy, operands[indexOffset]);
2165 mlir::Value lb =
2166 isShifted ? integerCast(loc, rewriter, idxTy, operands[shiftOffset])
2167 : one;
2168 mlir::Value step = one;
2169 bool normalSlice = isSliced;
2170 // Compute zero based index in dimension i of the element, applying
2171 // potential triplets and lower bounds.
2172 if (isSliced) {
2173 mlir::Value originalUb = *(sliceOps + 1);
2174 normalSlice =
2175 !mlir::isa_and_nonnull<fir::UndefOp>(originalUb.getDefiningOp());
2176 if (normalSlice)
2177 step = integerCast(loc, rewriter, idxTy, operands[sliceOffset + 2]);
2178 }
2179 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb);
2180 mlir::Value diff =
2181 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step);
2182 if (normalSlice) {
2183 mlir::Value sliceLb =
2184 integerCast(loc, rewriter, idxTy, operands[sliceOffset]);
2185 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb);
2186 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj);
2187 }
2188 // Update the offset given the stride and the zero based index `diff`
2189 // that was just computed.
2190 if (baseIsBoxed) {
2191 // Use stride in bytes from the descriptor.
2192 mlir::Value stride = loadStrideFromBox(loc, operands[0], i, rewriter);
2193 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride);
2194 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset);
2195 } else {
2196 // Use stride computed at last iteration.
2197 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt);
2198 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset);
2199 // Compute next stride assuming contiguity of the base array
2200 // (in element number).
2201 auto nextExt = integerCast(loc, rewriter, idxTy, operands[shapeOffset]);
2202 prevExt =
2203 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt);
2204 }
2205 }
2206
2207 // Add computed offset to the base address.
2208 if (baseIsBoxed) {
2209 // Working with byte offsets. The base address is read from the fir.box.
2210 // and need to be casted to i8* to do the pointer arithmetic.
2211 mlir::Type baseTy = getBaseAddrTypeFromBox(operands[0].getType());
2212 mlir::Value base =
2213 loadBaseAddrFromBox(loc, baseTy, operands[0], rewriter);
2214 mlir::Type voidPtrTy = getVoidPtrType();
2215 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
2216 llvm::SmallVector<mlir::Value> args{offset};
2217 auto addr =
2218 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args);
2219 if (coor.subcomponent().empty()) {
2220 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, addr);
2221 return mlir::success();
2222 }
2223 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr);
2224 args.clear();
2225 args.push_back(zero);
2226 if (!coor.lenParams().empty()) {
2227 // If type parameters are present, then we don't want to use a GEPOp
2228 // as below, as the LLVM struct type cannot be statically defined.
2229 TODO(loc, "derived type with type parameters");
2230 }
2231 // TODO: array offset subcomponents must be converted to LLVM's
2232 // row-major layout here.
2233 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i)
2234 args.push_back(operands[i]);
2235 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, casted, args);
2236 return mlir::success();
2237 }
2238
2239 // The array was not boxed, so it must be contiguous. offset is therefore an
2240 // element offset and the base type is kept in the GEP unless the element
2241 // type size is itself dynamic.
2242 mlir::Value base;
2243 if (coor.subcomponent().empty()) {
2244 // No subcomponent.
2245 if (!coor.lenParams().empty()) {
2246 // Type parameters. Adjust element size explicitly.
2247 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType());
2248 assert(eleTy && "result must be a reference-like type");
2249 if (fir::characterWithDynamicLen(eleTy)) {
2250 assert(coor.lenParams().size() == 1);
2251 auto length = integerCast(loc, rewriter, idxTy,
2252 operands[coor.lenParamsOffset()]);
2253 offset =
2254 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, length);
2255 } else {
2256 TODO(loc, "compute size of derived type with type parameters");
2257 }
2258 }
2259 // Cast the base address to a pointer to T.
2260 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, operands[0]);
2261 } else {
2262 // Operand #0 must have a pointer type. For subcomponent slicing, we
2263 // want to cast away the array type and have a plain struct type.
2264 mlir::Type ty0 = operands[0].getType();
2265 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>();
2266 assert(ptrTy && "expected pointer type");
2267 mlir::Type eleTy = ptrTy.getElementType();
2268 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>())
2269 eleTy = arrTy.getElementType();
2270 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy);
2271 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, operands[0]);
2272 }
2273 llvm::SmallVector<mlir::Value> args = {offset};
2274 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i)
2275 args.push_back(operands[i]);
2276 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args);
2277 return mlir::success();
2278 }
2279 };
2280 } // namespace
2281
2282 /// Convert to (memory) reference to a reference to a subobject.
2283 /// The coordinate_of op is a Swiss army knife operation that can be used on
2284 /// (memory) references to records, arrays, complex, etc. as well as boxes.
2285 /// With unboxed arrays, there is the restriction that the array have a static
2286 /// shape in all but the last column.
2287 struct CoordinateOpConversion
2288 : public FIROpAndTypeConversion<fir::CoordinateOp> {
2289 using FIROpAndTypeConversion::FIROpAndTypeConversion;
2290
2291 mlir::LogicalResult
doRewriteCoordinateOpConversion2292 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor,
2293 mlir::ConversionPatternRewriter &rewriter) const override {
2294 mlir::ValueRange operands = adaptor.getOperands();
2295
2296 mlir::Location loc = coor.getLoc();
2297 mlir::Value base = operands[0];
2298 mlir::Type baseObjectTy = coor.getBaseType();
2299 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy);
2300 assert(objectTy && "fir.coordinate_of expects a reference type");
2301
2302 // Complex type - basically, extract the real or imaginary part
2303 if (fir::isa_complex(objectTy)) {
2304 mlir::LLVM::ConstantOp c0 =
2305 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
2306 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]};
2307 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs);
2308 rewriter.replaceOp(coor, gep);
2309 return mlir::success();
2310 }
2311
2312 // Boxed type - get the base pointer from the box
2313 if (baseObjectTy.dyn_cast<fir::BoxType>())
2314 return doRewriteBox(coor, ty, operands, loc, rewriter);
2315
2316 // Reference, pointer or a heap type
2317 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>())
2318 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter);
2319
2320 return rewriter.notifyMatchFailure(
2321 coor, "fir.coordinate_of base operand has unsupported type");
2322 }
2323
getFieldNumberCoordinateOpConversion2324 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) {
2325 return fir::hasDynamicSize(ty)
2326 ? op.getDefiningOp()
2327 ->getAttrOfType<mlir::IntegerAttr>("field")
2328 .getInt()
2329 : getConstantIntValue(op);
2330 }
2331
hasSubDimensionsCoordinateOpConversion2332 static bool hasSubDimensions(mlir::Type type) {
2333 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>();
2334 }
2335
2336 /// Check whether this form of `!fir.coordinate_of` is supported. These
2337 /// additional checks are required, because we are not yet able to convert
2338 /// all valid forms of `!fir.coordinate_of`.
2339 /// TODO: Either implement the unsupported cases or extend the verifier
2340 /// in FIROps.cpp instead.
supportedCoordinateCoordinateOpConversion2341 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) {
2342 const std::size_t numOfCoors = coors.size();
2343 std::size_t i = 0;
2344 bool subEle = false;
2345 bool ptrEle = false;
2346 for (; i < numOfCoors; ++i) {
2347 mlir::Value nxtOpnd = coors[i];
2348 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) {
2349 subEle = true;
2350 i += arrTy.getDimension() - 1;
2351 type = arrTy.getEleTy();
2352 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) {
2353 subEle = true;
2354 type = recTy.getType(getFieldNumber(recTy, nxtOpnd));
2355 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) {
2356 subEle = true;
2357 type = tupTy.getType(getConstantIntValue(nxtOpnd));
2358 } else {
2359 ptrEle = true;
2360 }
2361 }
2362 if (ptrEle)
2363 return (!subEle) && (numOfCoors == 1);
2364 return subEle && (i >= numOfCoors);
2365 }
2366
2367 /// Walk the abstract memory layout and determine if the path traverses any
2368 /// array types with unknown shape. Return true iff all the array types have a
2369 /// constant shape along the path.
arraysHaveKnownShapeCoordinateOpConversion2370 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) {
2371 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) {
2372 mlir::Value nxtOpnd = coors[i];
2373 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) {
2374 if (fir::sequenceWithNonConstantShape(arrTy))
2375 return false;
2376 i += arrTy.getDimension() - 1;
2377 type = arrTy.getEleTy();
2378 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) {
2379 type = strTy.getType(getFieldNumber(strTy, nxtOpnd));
2380 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) {
2381 type = strTy.getType(getConstantIntValue(nxtOpnd));
2382 } else {
2383 return true;
2384 }
2385 }
2386 return true;
2387 }
2388
2389 private:
2390 mlir::LogicalResult
doRewriteBoxCoordinateOpConversion2391 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands,
2392 mlir::Location loc,
2393 mlir::ConversionPatternRewriter &rewriter) const {
2394 mlir::Type boxObjTy = coor.getBaseType();
2395 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`");
2396
2397 mlir::Value boxBaseAddr = operands[0];
2398
2399 // 1. SPECIAL CASE (uses `fir.len_param_index`):
2400 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>>
2401 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}>
2402 // %addr = coordinate_of %box, %lenp
2403 if (coor.getNumOperands() == 2) {
2404 mlir::Operation *coordinateDef =
2405 (*coor.getCoor().begin()).getDefiningOp();
2406 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef))
2407 TODO(loc,
2408 "fir.coordinate_of - fir.len_param_index is not supported yet");
2409 }
2410
2411 // 2. GENERAL CASE:
2412 // 2.1. (`fir.array`)
2413 // %box = ... : !fix.box<!fir.array<?xU>>
2414 // %idx = ... : index
2415 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U>
2416 // 2.2 (`fir.derived`)
2417 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>>
2418 // %idx = ... : i32
2419 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32>
2420 // 2.3 (`fir.derived` inside `fir.array`)
2421 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32,
2422 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr =
2423 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32>
2424 // 2.4. TODO: Either document or disable any other case that the following
2425 // implementation might convert.
2426 mlir::LLVM::ConstantOp c0 =
2427 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
2428 mlir::Value resultAddr =
2429 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()),
2430 boxBaseAddr, rewriter);
2431 // Component Type
2432 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy);
2433 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext());
2434
2435 for (unsigned i = 1, last = operands.size(); i < last; ++i) {
2436 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) {
2437 if (i != 1)
2438 TODO(loc, "fir.array nested inside other array and/or derived type");
2439 // Applies byte strides from the box. Ignore lower bound from box
2440 // since fir.coordinate_of indexes are zero based. Lowering takes care
2441 // of lower bound aspects. This both accounts for dynamically sized
2442 // types and non contiguous arrays.
2443 auto idxTy = lowerTy().indexType();
2444 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0);
2445 for (unsigned index = i, lastIndex = i + arrTy.getDimension();
2446 index < lastIndex; ++index) {
2447 mlir::Value stride =
2448 loadStrideFromBox(loc, operands[0], index - i, rewriter);
2449 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy,
2450 operands[index], stride);
2451 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off);
2452 }
2453 auto voidPtrBase =
2454 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr);
2455 llvm::SmallVector<mlir::Value> args = {off};
2456 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy,
2457 voidPtrBase, args);
2458 i += arrTy.getDimension() - 1;
2459 cpnTy = arrTy.getEleTy();
2460 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) {
2461 auto recRefTy =
2462 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy));
2463 mlir::Value nxtOpnd = operands[i];
2464 auto memObj =
2465 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr);
2466 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd};
2467 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd));
2468 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy);
2469 auto gep = rewriter.create<mlir::LLVM::GEPOp>(
2470 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj,
2471 args);
2472 resultAddr =
2473 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep);
2474 } else {
2475 fir::emitFatalError(loc, "unexpected type in coordinate_of");
2476 }
2477 }
2478
2479 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr);
2480 return mlir::success();
2481 }
2482
2483 mlir::LogicalResult
doRewriteRefOrPtrCoordinateOpConversion2484 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty,
2485 mlir::ValueRange operands, mlir::Location loc,
2486 mlir::ConversionPatternRewriter &rewriter) const {
2487 mlir::Type baseObjectTy = coor.getBaseType();
2488
2489 // Component Type
2490 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy);
2491 bool hasSubdimension = hasSubDimensions(cpnTy);
2492 bool columnIsDeferred = !hasSubdimension;
2493
2494 if (!supportedCoordinate(cpnTy, operands.drop_front(1)))
2495 TODO(loc, "unsupported combination of coordinate operands");
2496
2497 const bool hasKnownShape =
2498 arraysHaveKnownShape(cpnTy, operands.drop_front(1));
2499
2500 // If only the column is `?`, then we can simply place the column value in
2501 // the 0-th GEP position.
2502 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) {
2503 if (!hasKnownShape) {
2504 const unsigned sz = arrTy.getDimension();
2505 if (arraysHaveKnownShape(arrTy.getEleTy(),
2506 operands.drop_front(1 + sz))) {
2507 fir::SequenceType::ShapeRef shape = arrTy.getShape();
2508 bool allConst = true;
2509 for (unsigned i = 0; i < sz - 1; ++i) {
2510 if (shape[i] < 0) {
2511 allConst = false;
2512 break;
2513 }
2514 }
2515 if (allConst)
2516 columnIsDeferred = true;
2517 }
2518 }
2519 }
2520
2521 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy)))
2522 return mlir::emitError(
2523 loc, "fir.coordinate_of with a dynamic element size is unsupported");
2524
2525 if (hasKnownShape || columnIsDeferred) {
2526 llvm::SmallVector<mlir::Value> offs;
2527 if (hasKnownShape && hasSubdimension) {
2528 mlir::LLVM::ConstantOp c0 =
2529 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0);
2530 offs.push_back(c0);
2531 }
2532 llvm::Optional<int> dims;
2533 llvm::SmallVector<mlir::Value> arrIdx;
2534 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) {
2535 mlir::Value nxtOpnd = operands[i];
2536
2537 if (!cpnTy)
2538 return mlir::emitError(loc, "invalid coordinate/check failed");
2539
2540 // check if the i-th coordinate relates to an array
2541 if (dims) {
2542 arrIdx.push_back(nxtOpnd);
2543 int dimsLeft = *dims;
2544 if (dimsLeft > 1) {
2545 dims = dimsLeft - 1;
2546 continue;
2547 }
2548 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy();
2549 // append array range in reverse (FIR arrays are column-major)
2550 offs.append(arrIdx.rbegin(), arrIdx.rend());
2551 arrIdx.clear();
2552 dims.reset();
2553 continue;
2554 }
2555 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) {
2556 int d = arrTy.getDimension() - 1;
2557 if (d > 0) {
2558 dims = d;
2559 arrIdx.push_back(nxtOpnd);
2560 continue;
2561 }
2562 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy();
2563 offs.push_back(nxtOpnd);
2564 continue;
2565 }
2566
2567 // check if the i-th coordinate relates to a field
2568 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>())
2569 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd));
2570 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>())
2571 cpnTy = tupTy.getType(getConstantIntValue(nxtOpnd));
2572 else
2573 cpnTy = nullptr;
2574
2575 offs.push_back(nxtOpnd);
2576 }
2577 if (dims)
2578 offs.append(arrIdx.rbegin(), arrIdx.rend());
2579 mlir::Value base = operands[0];
2580 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs);
2581 rewriter.replaceOp(coor, retval);
2582 return mlir::success();
2583 }
2584
2585 return mlir::emitError(
2586 loc, "fir.coordinate_of base operand has unsupported type");
2587 }
2588 };
2589
2590 /// Convert `fir.field_index`. The conversion depends on whether the size of
2591 /// the record is static or dynamic.
2592 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> {
2593 using FIROpConversion::FIROpConversion;
2594
2595 // NB: most field references should be resolved by this point
2596 mlir::LogicalResult
matchAndRewriteFieldIndexOpConversion2597 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor,
2598 mlir::ConversionPatternRewriter &rewriter) const override {
2599 auto recTy = field.getOnType().cast<fir::RecordType>();
2600 unsigned index = recTy.getFieldIndex(field.getFieldId());
2601
2602 if (!fir::hasDynamicSize(recTy)) {
2603 // Derived type has compile-time constant layout. Return index of the
2604 // component type in the parent type (to be used in GEP).
2605 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset(
2606 field.getLoc(), rewriter, index)});
2607 return mlir::success();
2608 }
2609
2610 // Derived type has compile-time constant layout. Call the compiler
2611 // generated function to determine the byte offset of the field at runtime.
2612 // This returns a non-constant.
2613 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get(
2614 field.getContext(), getOffsetMethodName(recTy, field.getFieldId()));
2615 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr);
2616 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr(
2617 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index));
2618 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>(
2619 field, lowerTy().offsetType(), adaptor.getOperands(),
2620 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr});
2621 return mlir::success();
2622 }
2623
2624 // Re-Construct the name of the compiler generated method that calculates the
2625 // offset
getOffsetMethodNameFieldIndexOpConversion2626 inline static std::string getOffsetMethodName(fir::RecordType recTy,
2627 llvm::StringRef field) {
2628 return recTy.getName().str() + "P." + field.str() + ".offset";
2629 }
2630 };
2631
2632 /// Convert `fir.end`
2633 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> {
2634 using FIROpConversion::FIROpConversion;
2635
2636 mlir::LogicalResult
matchAndRewriteFirEndOpConversion2637 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor,
2638 mlir::ConversionPatternRewriter &rewriter) const override {
2639 TODO(firEnd.getLoc(), "fir.end codegen");
2640 return mlir::failure();
2641 }
2642 };
2643
2644 /// Lower `fir.gentypedesc` to a global constant.
2645 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> {
2646 using FIROpConversion::FIROpConversion;
2647
2648 mlir::LogicalResult
matchAndRewriteGenTypeDescOpConversion2649 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor,
2650 mlir::ConversionPatternRewriter &rewriter) const override {
2651 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen");
2652 return mlir::failure();
2653 }
2654 };
2655
2656 /// Lower `fir.has_value` operation to `llvm.return` operation.
2657 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> {
2658 using FIROpConversion::FIROpConversion;
2659
2660 mlir::LogicalResult
matchAndRewriteHasValueOpConversion2661 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor,
2662 mlir::ConversionPatternRewriter &rewriter) const override {
2663 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op,
2664 adaptor.getOperands());
2665 return mlir::success();
2666 }
2667 };
2668
2669 /// Lower `fir.global` operation to `llvm.global` operation.
2670 /// `fir.insert_on_range` operations are replaced with constant dense attribute
2671 /// if they are applied on the full range.
2672 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> {
2673 using FIROpConversion::FIROpConversion;
2674
2675 mlir::LogicalResult
matchAndRewriteGlobalOpConversion2676 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor,
2677 mlir::ConversionPatternRewriter &rewriter) const override {
2678 auto tyAttr = convertType(global.getType());
2679 if (global.getType().isa<fir::BoxType>())
2680 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType();
2681 auto loc = global.getLoc();
2682 mlir::Attribute initAttr = global.getInitVal().value_or(mlir::Attribute());
2683 auto linkage = convertLinkage(global.getLinkName());
2684 auto isConst = global.getConstant().has_value();
2685 auto g = rewriter.create<mlir::LLVM::GlobalOp>(
2686 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr);
2687 auto &gr = g.getInitializerRegion();
2688 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end());
2689 if (!gr.empty()) {
2690 // Replace insert_on_range with a constant dense attribute if the
2691 // initialization is on the full range.
2692 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>();
2693 for (auto insertOp : insertOnRangeOps) {
2694 if (isFullRange(insertOp.getCoor(), insertOp.getType())) {
2695 auto seqTyAttr = convertType(insertOp.getType());
2696 auto *op = insertOp.getVal().getDefiningOp();
2697 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op);
2698 if (!constant) {
2699 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op);
2700 if (!convertOp)
2701 continue;
2702 constant = mlir::cast<mlir::arith::ConstantOp>(
2703 convertOp.getValue().getDefiningOp());
2704 }
2705 mlir::Type vecType = mlir::VectorType::get(
2706 insertOp.getType().getShape(), constant.getType());
2707 auto denseAttr = mlir::DenseElementsAttr::get(
2708 vecType.cast<mlir::ShapedType>(), constant.getValue());
2709 rewriter.setInsertionPointAfter(insertOp);
2710 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>(
2711 insertOp, seqTyAttr, denseAttr);
2712 }
2713 }
2714 }
2715 rewriter.eraseOp(global);
2716 return mlir::success();
2717 }
2718
isFullRangeGlobalOpConversion2719 bool isFullRange(mlir::DenseIntElementsAttr indexes,
2720 fir::SequenceType seqTy) const {
2721 auto extents = seqTy.getShape();
2722 if (indexes.size() / 2 != static_cast<int64_t>(extents.size()))
2723 return false;
2724 auto cur_index = indexes.value_begin<int64_t>();
2725 for (unsigned i = 0; i < indexes.size(); i += 2) {
2726 if (*(cur_index++) != 0)
2727 return false;
2728 if (*(cur_index++) != extents[i / 2] - 1)
2729 return false;
2730 }
2731 return true;
2732 }
2733
2734 // TODO: String comparaison should be avoided. Replace linkName with an
2735 // enumeration.
2736 mlir::LLVM::Linkage
convertLinkageGlobalOpConversion2737 convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const {
2738 if (optLinkage) {
2739 auto name = *optLinkage;
2740 if (name == "internal")
2741 return mlir::LLVM::Linkage::Internal;
2742 if (name == "linkonce")
2743 return mlir::LLVM::Linkage::Linkonce;
2744 if (name == "linkonce_odr")
2745 return mlir::LLVM::Linkage::LinkonceODR;
2746 if (name == "common")
2747 return mlir::LLVM::Linkage::Common;
2748 if (name == "weak")
2749 return mlir::LLVM::Linkage::Weak;
2750 }
2751 return mlir::LLVM::Linkage::External;
2752 }
2753 };
2754
2755 /// `fir.load` --> `llvm.load`
2756 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> {
2757 using FIROpConversion::FIROpConversion;
2758
2759 mlir::LogicalResult
matchAndRewriteLoadOpConversion2760 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor,
2761 mlir::ConversionPatternRewriter &rewriter) const override {
2762 // fir.box is a special case because it is considered as an ssa values in
2763 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box>
2764 // and fir.box end up being the same llvm types and loading a
2765 // fir.ref<fir.box> is actually a no op in LLVM.
2766 if (load.getType().isa<fir::BoxType>()) {
2767 rewriter.replaceOp(load, adaptor.getOperands()[0]);
2768 } else {
2769 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>(
2770 load, convertType(load.getType()), adaptor.getOperands(),
2771 load->getAttrs());
2772 }
2773 return mlir::success();
2774 }
2775 };
2776
2777 /// Lower `fir.no_reassoc` to LLVM IR dialect.
2778 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast
2779 /// math flags?
2780 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> {
2781 using FIROpConversion::FIROpConversion;
2782
2783 mlir::LogicalResult
matchAndRewriteNoReassocOpConversion2784 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor,
2785 mlir::ConversionPatternRewriter &rewriter) const override {
2786 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]);
2787 return mlir::success();
2788 }
2789 };
2790
genCondBrOp(mlir::Location loc,mlir::Value cmp,mlir::Block * dest,llvm::Optional<mlir::ValueRange> destOps,mlir::ConversionPatternRewriter & rewriter,mlir::Block * newBlock)2791 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest,
2792 llvm::Optional<mlir::ValueRange> destOps,
2793 mlir::ConversionPatternRewriter &rewriter,
2794 mlir::Block *newBlock) {
2795 if (destOps)
2796 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, *destOps, newBlock,
2797 mlir::ValueRange());
2798 else
2799 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock);
2800 }
2801
2802 template <typename A, typename B>
genBrOp(A caseOp,mlir::Block * dest,llvm::Optional<B> destOps,mlir::ConversionPatternRewriter & rewriter)2803 static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps,
2804 mlir::ConversionPatternRewriter &rewriter) {
2805 if (destOps)
2806 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, *destOps, dest);
2807 else
2808 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest);
2809 }
2810
genCaseLadderStep(mlir::Location loc,mlir::Value cmp,mlir::Block * dest,llvm::Optional<mlir::ValueRange> destOps,mlir::ConversionPatternRewriter & rewriter)2811 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp,
2812 mlir::Block *dest,
2813 llvm::Optional<mlir::ValueRange> destOps,
2814 mlir::ConversionPatternRewriter &rewriter) {
2815 auto *thisBlock = rewriter.getInsertionBlock();
2816 auto *newBlock = createBlock(rewriter, dest);
2817 rewriter.setInsertionPointToEnd(thisBlock);
2818 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock);
2819 rewriter.setInsertionPointToEnd(newBlock);
2820 }
2821
2822 /// Conversion of `fir.select_case`
2823 ///
2824 /// The `fir.select_case` operation is converted to a if-then-else ladder.
2825 /// Depending on the case condition type, one or several comparison and
2826 /// conditional branching can be generated.
2827 ///
2828 /// A a point value case such as `case(4)`, a lower bound case such as
2829 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a
2830 /// simple comparison between the selector value and the constant value in the
2831 /// case. The block associated with the case condition is then executed if
2832 /// the comparison succeed otherwise it branch to the next block with the
2833 /// comparison for the the next case conditon.
2834 ///
2835 /// A closed interval case condition such as `case(7:10)` is converted with a
2836 /// first comparison and conditional branching for the lower bound. If
2837 /// successful, it branch to a second block with the comparison for the
2838 /// upper bound in the same case condition.
2839 ///
2840 /// TODO: lowering of CHARACTER type cases is not handled yet.
2841 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> {
2842 using FIROpConversion::FIROpConversion;
2843
2844 mlir::LogicalResult
matchAndRewriteSelectCaseOpConversion2845 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor,
2846 mlir::ConversionPatternRewriter &rewriter) const override {
2847 unsigned conds = caseOp.getNumConditions();
2848 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue();
2849 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145)
2850 auto ty = caseOp.getSelector().getType();
2851 if (ty.isa<fir::CharacterType>()) {
2852 TODO(caseOp.getLoc(), "fir.select_case codegen with character type");
2853 return mlir::failure();
2854 }
2855 mlir::Value selector = caseOp.getSelector(adaptor.getOperands());
2856 auto loc = caseOp.getLoc();
2857 for (unsigned t = 0; t != conds; ++t) {
2858 mlir::Block *dest = caseOp.getSuccessor(t);
2859 llvm::Optional<mlir::ValueRange> destOps =
2860 caseOp.getSuccessorOperands(adaptor.getOperands(), t);
2861 llvm::Optional<mlir::ValueRange> cmpOps =
2862 *caseOp.getCompareOperands(adaptor.getOperands(), t);
2863 mlir::Value caseArg = *(cmpOps.value().begin());
2864 mlir::Attribute attr = cases[t];
2865 if (attr.isa<fir::PointIntervalAttr>()) {
2866 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
2867 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg);
2868 genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
2869 continue;
2870 }
2871 if (attr.isa<fir::LowerBoundAttr>()) {
2872 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
2873 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector);
2874 genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
2875 continue;
2876 }
2877 if (attr.isa<fir::UpperBoundAttr>()) {
2878 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
2879 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg);
2880 genCaseLadderStep(loc, cmp, dest, destOps, rewriter);
2881 continue;
2882 }
2883 if (attr.isa<fir::ClosedIntervalAttr>()) {
2884 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
2885 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector);
2886 auto *thisBlock = rewriter.getInsertionBlock();
2887 auto *newBlock1 = createBlock(rewriter, dest);
2888 auto *newBlock2 = createBlock(rewriter, dest);
2889 rewriter.setInsertionPointToEnd(thisBlock);
2890 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2);
2891 rewriter.setInsertionPointToEnd(newBlock1);
2892 mlir::Value caseArg0 = *(cmpOps.value().begin() + 1);
2893 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>(
2894 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0);
2895 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2);
2896 rewriter.setInsertionPointToEnd(newBlock2);
2897 continue;
2898 }
2899 assert(attr.isa<mlir::UnitAttr>());
2900 assert((t + 1 == conds) && "unit must be last");
2901 genBrOp(caseOp, dest, destOps, rewriter);
2902 }
2903 return mlir::success();
2904 }
2905 };
2906
2907 template <typename OP>
selectMatchAndRewrite(fir::LLVMTypeConverter & lowering,OP select,typename OP::Adaptor adaptor,mlir::ConversionPatternRewriter & rewriter)2908 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select,
2909 typename OP::Adaptor adaptor,
2910 mlir::ConversionPatternRewriter &rewriter) {
2911 unsigned conds = select.getNumConditions();
2912 auto cases = select.getCases().getValue();
2913 mlir::Value selector = adaptor.getSelector();
2914 auto loc = select.getLoc();
2915 assert(conds > 0 && "select must have cases");
2916
2917 llvm::SmallVector<mlir::Block *> destinations;
2918 llvm::SmallVector<mlir::ValueRange> destinationsOperands;
2919 mlir::Block *defaultDestination;
2920 mlir::ValueRange defaultOperands;
2921 llvm::SmallVector<int32_t> caseValues;
2922
2923 for (unsigned t = 0; t != conds; ++t) {
2924 mlir::Block *dest = select.getSuccessor(t);
2925 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t);
2926 const mlir::Attribute &attr = cases[t];
2927 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) {
2928 destinations.push_back(dest);
2929 destinationsOperands.push_back(destOps ? *destOps : mlir::ValueRange{});
2930 caseValues.push_back(intAttr.getInt());
2931 continue;
2932 }
2933 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>());
2934 assert((t + 1 == conds) && "unit must be last");
2935 defaultDestination = dest;
2936 defaultOperands = destOps ? *destOps : mlir::ValueRange{};
2937 }
2938
2939 // LLVM::SwitchOp takes a i32 type for the selector.
2940 if (select.getSelector().getType() != rewriter.getI32Type())
2941 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(),
2942 selector);
2943
2944 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>(
2945 select, selector,
2946 /*defaultDestination=*/defaultDestination,
2947 /*defaultOperands=*/defaultOperands,
2948 /*caseValues=*/caseValues,
2949 /*caseDestinations=*/destinations,
2950 /*caseOperands=*/destinationsOperands,
2951 /*branchWeights=*/llvm::ArrayRef<std::int32_t>());
2952 }
2953
2954 /// conversion of fir::SelectOp to an if-then-else ladder
2955 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> {
2956 using FIROpConversion::FIROpConversion;
2957
2958 mlir::LogicalResult
matchAndRewriteSelectOpConversion2959 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor,
2960 mlir::ConversionPatternRewriter &rewriter) const override {
2961 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter);
2962 return mlir::success();
2963 }
2964 };
2965
2966 /// conversion of fir::SelectRankOp to an if-then-else ladder
2967 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> {
2968 using FIROpConversion::FIROpConversion;
2969
2970 mlir::LogicalResult
matchAndRewriteSelectRankOpConversion2971 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor,
2972 mlir::ConversionPatternRewriter &rewriter) const override {
2973 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter);
2974 return mlir::success();
2975 }
2976 };
2977
2978 /// Lower `fir.select_type` to LLVM IR dialect.
2979 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> {
2980 using FIROpConversion::FIROpConversion;
2981
2982 mlir::LogicalResult
matchAndRewriteSelectTypeOpConversion2983 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor,
2984 mlir::ConversionPatternRewriter &rewriter) const override {
2985 mlir::emitError(select.getLoc(),
2986 "fir.select_type should have already been converted");
2987 return mlir::failure();
2988 }
2989 };
2990
2991 /// `fir.store` --> `llvm.store`
2992 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> {
2993 using FIROpConversion::FIROpConversion;
2994
2995 mlir::LogicalResult
matchAndRewriteStoreOpConversion2996 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor,
2997 mlir::ConversionPatternRewriter &rewriter) const override {
2998 if (store.getValue().getType().isa<fir::BoxType>()) {
2999 // fir.box value is actually in memory, load it first before storing it.
3000 mlir::Location loc = store.getLoc();
3001 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType();
3002 auto val = rewriter.create<mlir::LLVM::LoadOp>(
3003 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(),
3004 adaptor.getOperands()[0]);
3005 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>(
3006 store, val, adaptor.getOperands()[1]);
3007 } else {
3008 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>(
3009 store, adaptor.getOperands()[0], adaptor.getOperands()[1]);
3010 }
3011 return mlir::success();
3012 }
3013 };
3014
3015 namespace {
3016
3017 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for
3018 /// the character buffer and one for the buffer length.
3019 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> {
3020 using FIROpConversion::FIROpConversion;
3021
3022 mlir::LogicalResult
matchAndRewrite__anon447c3e181111::UnboxCharOpConversion3023 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor,
3024 mlir::ConversionPatternRewriter &rewriter) const override {
3025 auto *ctx = unboxchar.getContext();
3026
3027 mlir::Type lenTy = convertType(unboxchar.getType(1));
3028 mlir::Value tuple = adaptor.getOperands()[0];
3029 mlir::Type tupleTy = tuple.getType();
3030
3031 mlir::Location loc = unboxchar.getLoc();
3032 mlir::Value ptrToBuffer =
3033 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0);
3034
3035 mlir::LLVM::ExtractValueOp len =
3036 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1);
3037 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len);
3038
3039 rewriter.replaceOp(unboxchar,
3040 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast});
3041 return mlir::success();
3042 }
3043 };
3044
3045 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its
3046 /// components.
3047 /// TODO: Part of supporting Fortran 2003 procedure pointers.
3048 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> {
3049 using FIROpConversion::FIROpConversion;
3050
3051 mlir::LogicalResult
matchAndRewrite__anon447c3e181111::UnboxProcOpConversion3052 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor,
3053 mlir::ConversionPatternRewriter &rewriter) const override {
3054 TODO(unboxproc.getLoc(), "fir.unboxproc codegen");
3055 return mlir::failure();
3056 }
3057 };
3058
3059 /// convert to LLVM IR dialect `undef`
3060 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> {
3061 using FIROpConversion::FIROpConversion;
3062
3063 mlir::LogicalResult
matchAndRewrite__anon447c3e181111::UndefOpConversion3064 matchAndRewrite(fir::UndefOp undef, OpAdaptor,
3065 mlir::ConversionPatternRewriter &rewriter) const override {
3066 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>(
3067 undef, convertType(undef.getType()));
3068 return mlir::success();
3069 }
3070 };
3071
3072 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> {
3073 using FIROpConversion::FIROpConversion;
3074
3075 mlir::LogicalResult
matchAndRewrite__anon447c3e181111::ZeroOpConversion3076 matchAndRewrite(fir::ZeroOp zero, OpAdaptor,
3077 mlir::ConversionPatternRewriter &rewriter) const override {
3078 mlir::Type ty = convertType(zero.getType());
3079 if (ty.isa<mlir::LLVM::LLVMPointerType>()) {
3080 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty);
3081 } else if (ty.isa<mlir::IntegerType>()) {
3082 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(
3083 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0));
3084 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) {
3085 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(
3086 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0));
3087 } else {
3088 // TODO: create ConstantAggregateZero for FIR aggregate/array types.
3089 return rewriter.notifyMatchFailure(
3090 zero,
3091 "conversion of fir.zero with aggregate type not implemented yet");
3092 }
3093 return mlir::success();
3094 }
3095 };
3096
3097 /// `fir.unreachable` --> `llvm.unreachable`
3098 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> {
3099 using FIROpConversion::FIROpConversion;
3100
3101 mlir::LogicalResult
matchAndRewrite__anon447c3e181111::UnreachableOpConversion3102 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor,
3103 mlir::ConversionPatternRewriter &rewriter) const override {
3104 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach);
3105 return mlir::success();
3106 }
3107 };
3108
3109 /// `fir.is_present` -->
3110 /// ```
3111 /// %0 = llvm.mlir.constant(0 : i64)
3112 /// %1 = llvm.ptrtoint %0
3113 /// %2 = llvm.icmp "ne" %1, %0 : i64
3114 /// ```
3115 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> {
3116 using FIROpConversion::FIROpConversion;
3117
3118 mlir::LogicalResult
matchAndRewrite__anon447c3e181111::IsPresentOpConversion3119 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor,
3120 mlir::ConversionPatternRewriter &rewriter) const override {
3121 mlir::Type idxTy = lowerTy().indexType();
3122 mlir::Location loc = isPresent.getLoc();
3123 auto ptr = adaptor.getOperands()[0];
3124
3125 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) {
3126 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>();
3127 assert(!structTy.isOpaque() && !structTy.getBody().empty());
3128
3129 mlir::Type ty = structTy.getBody()[0];
3130 mlir::MLIRContext *ctx = isPresent.getContext();
3131 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
3132 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0);
3133 }
3134 mlir::LLVM::ConstantOp c0 =
3135 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0);
3136 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr);
3137 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>(
3138 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0);
3139
3140 return mlir::success();
3141 }
3142 };
3143
3144 /// Create value signaling an absent optional argument in a call, e.g.
3145 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>`
3146 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> {
3147 using FIROpConversion::FIROpConversion;
3148
3149 mlir::LogicalResult
matchAndRewrite__anon447c3e181111::AbsentOpConversion3150 matchAndRewrite(fir::AbsentOp absent, OpAdaptor,
3151 mlir::ConversionPatternRewriter &rewriter) const override {
3152 mlir::Type ty = convertType(absent.getType());
3153 mlir::Location loc = absent.getLoc();
3154
3155 if (absent.getType().isa<fir::BoxCharType>()) {
3156 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>();
3157 assert(!structTy.isOpaque() && !structTy.getBody().empty());
3158 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
3159 auto nullField =
3160 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]);
3161 mlir::MLIRContext *ctx = absent.getContext();
3162 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
3163 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
3164 absent, ty, undefStruct, nullField, c0);
3165 } else {
3166 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty);
3167 }
3168 return mlir::success();
3169 }
3170 };
3171
3172 //
3173 // Primitive operations on Complex types
3174 //
3175
3176 /// Generate inline code for complex addition/subtraction
3177 template <typename LLVMOP, typename OPTY>
3178 static mlir::LLVM::InsertValueOp
complexSum(OPTY sumop,mlir::ValueRange opnds,mlir::ConversionPatternRewriter & rewriter,fir::LLVMTypeConverter & lowering)3179 complexSum(OPTY sumop, mlir::ValueRange opnds,
3180 mlir::ConversionPatternRewriter &rewriter,
3181 fir::LLVMTypeConverter &lowering) {
3182 mlir::Value a = opnds[0];
3183 mlir::Value b = opnds[1];
3184 auto loc = sumop.getLoc();
3185 auto ctx = sumop.getContext();
3186 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
3187 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
3188 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType()));
3189 mlir::Type ty = lowering.convertType(sumop.getType());
3190 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0);
3191 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1);
3192 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0);
3193 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1);
3194 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1);
3195 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1);
3196 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
3197 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0);
3198 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1);
3199 }
3200 } // namespace
3201
3202 namespace {
3203 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> {
3204 using FIROpConversion::FIROpConversion;
3205
3206 mlir::LogicalResult
matchAndRewrite__anon447c3e181211::AddcOpConversion3207 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor,
3208 mlir::ConversionPatternRewriter &rewriter) const override {
3209 // given: (x + iy) + (x' + iy')
3210 // result: (x + x') + i(y + y')
3211 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(),
3212 rewriter, lowerTy());
3213 rewriter.replaceOp(addc, r.getResult());
3214 return mlir::success();
3215 }
3216 };
3217
3218 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> {
3219 using FIROpConversion::FIROpConversion;
3220
3221 mlir::LogicalResult
matchAndRewrite__anon447c3e181211::SubcOpConversion3222 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor,
3223 mlir::ConversionPatternRewriter &rewriter) const override {
3224 // given: (x + iy) - (x' + iy')
3225 // result: (x - x') + i(y - y')
3226 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(),
3227 rewriter, lowerTy());
3228 rewriter.replaceOp(subc, r.getResult());
3229 return mlir::success();
3230 }
3231 };
3232
3233 /// Inlined complex multiply
3234 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> {
3235 using FIROpConversion::FIROpConversion;
3236
3237 mlir::LogicalResult
matchAndRewrite__anon447c3e181211::MulcOpConversion3238 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor,
3239 mlir::ConversionPatternRewriter &rewriter) const override {
3240 // TODO: Can we use a call to __muldc3 ?
3241 // given: (x + iy) * (x' + iy')
3242 // result: (xx'-yy')+i(xy'+yx')
3243 mlir::Value a = adaptor.getOperands()[0];
3244 mlir::Value b = adaptor.getOperands()[1];
3245 auto loc = mulc.getLoc();
3246 auto *ctx = mulc.getContext();
3247 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
3248 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
3249 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType()));
3250 mlir::Type ty = convertType(mulc.getType());
3251 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0);
3252 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1);
3253 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0);
3254 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1);
3255 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1);
3256 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1);
3257 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1);
3258 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx);
3259 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1);
3260 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy);
3261 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
3262 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0);
3263 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1);
3264 rewriter.replaceOp(mulc, r0.getResult());
3265 return mlir::success();
3266 }
3267 };
3268
3269 /// Inlined complex division
3270 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> {
3271 using FIROpConversion::FIROpConversion;
3272
3273 mlir::LogicalResult
matchAndRewrite__anon447c3e181211::DivcOpConversion3274 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor,
3275 mlir::ConversionPatternRewriter &rewriter) const override {
3276 // TODO: Can we use a call to __divdc3 instead?
3277 // Just generate inline code for now.
3278 // given: (x + iy) / (x' + iy')
3279 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y'
3280 mlir::Value a = adaptor.getOperands()[0];
3281 mlir::Value b = adaptor.getOperands()[1];
3282 auto loc = divc.getLoc();
3283 auto *ctx = divc.getContext();
3284 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0));
3285 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1));
3286 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType()));
3287 mlir::Type ty = convertType(divc.getType());
3288 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0);
3289 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1);
3290 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0);
3291 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1);
3292 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1);
3293 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1);
3294 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1);
3295 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1);
3296 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1);
3297 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1);
3298 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1);
3299 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy);
3300 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy);
3301 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d);
3302 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d);
3303 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
3304 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0);
3305 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1);
3306 rewriter.replaceOp(divc, r0.getResult());
3307 return mlir::success();
3308 }
3309 };
3310
3311 /// Inlined complex negation
3312 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> {
3313 using FIROpConversion::FIROpConversion;
3314
3315 mlir::LogicalResult
matchAndRewrite__anon447c3e181211::NegcOpConversion3316 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor,
3317 mlir::ConversionPatternRewriter &rewriter) const override {
3318 // given: -(x + iy)
3319 // result: -x - iy
3320 auto *ctxt = neg.getContext();
3321 auto eleTy = convertType(getComplexEleTy(neg.getType()));
3322 auto ty = convertType(neg.getType());
3323 auto loc = neg.getLoc();
3324 mlir::Value o0 = adaptor.getOperands()[0];
3325 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0));
3326 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1));
3327 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0);
3328 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1);
3329 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp);
3330 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip);
3331 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0);
3332 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1);
3333 return mlir::success();
3334 }
3335 };
3336
3337 /// Conversion pattern for operation that must be dead. The information in these
3338 /// operations is used by other operation. At this point they should not have
3339 /// anymore uses.
3340 /// These operations are normally dead after the pre-codegen pass.
3341 template <typename FromOp>
3342 struct MustBeDeadConversion : public FIROpConversion<FromOp> {
MustBeDeadConversion__anon447c3e181211::MustBeDeadConversion3343 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering,
3344 const fir::FIRToLLVMPassOptions &options)
3345 : FIROpConversion<FromOp>(lowering, options) {}
3346 using OpAdaptor = typename FromOp::Adaptor;
3347
3348 mlir::LogicalResult
matchAndRewrite__anon447c3e181211::MustBeDeadConversion3349 matchAndRewrite(FromOp op, OpAdaptor adaptor,
3350 mlir::ConversionPatternRewriter &rewriter) const final {
3351 if (!op->getUses().empty())
3352 return rewriter.notifyMatchFailure(op, "op must be dead");
3353 rewriter.eraseOp(op);
3354 return mlir::success();
3355 }
3356 };
3357
3358 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> {
3359 using MustBeDeadConversion::MustBeDeadConversion;
3360 };
3361
3362 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> {
3363 using MustBeDeadConversion::MustBeDeadConversion;
3364 };
3365
3366 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> {
3367 using MustBeDeadConversion::MustBeDeadConversion;
3368 };
3369
3370 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> {
3371 using MustBeDeadConversion::MustBeDeadConversion;
3372 };
3373
3374 } // namespace
3375
3376 namespace {
3377 /// Convert FIR dialect to LLVM dialect
3378 ///
3379 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An
3380 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect.
3381 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> {
3382 public:
3383 FIRToLLVMLowering() = default;
FIRToLLVMLowering(fir::FIRToLLVMPassOptions options)3384 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {}
getModule()3385 mlir::ModuleOp getModule() { return getOperation(); }
3386
runOnOperation()3387 void runOnOperation() override final {
3388 auto mod = getModule();
3389 if (!forcedTargetTriple.empty())
3390 fir::setTargetTriple(mod, forcedTargetTriple);
3391
3392 auto *context = getModule().getContext();
3393 fir::LLVMTypeConverter typeConverter{getModule()};
3394 mlir::RewritePatternSet pattern(context);
3395 pattern.insert<
3396 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion,
3397 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion,
3398 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion,
3399 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion,
3400 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion,
3401 CallOpConversion, CmpcOpConversion, ConstcOpConversion,
3402 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion,
3403 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion,
3404 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion,
3405 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion,
3406 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion,
3407 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion,
3408 InsertValueOpConversion, IsPresentOpConversion,
3409 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion,
3410 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion,
3411 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion,
3412 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion,
3413 SliceOpConversion, StoreOpConversion, StringLitOpConversion,
3414 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion,
3415 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion,
3416 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter,
3417 options);
3418 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern);
3419 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern);
3420 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter,
3421 pattern);
3422 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter,
3423 pattern);
3424 // Convert math-like dialect operations, which can be produced
3425 // when late math lowering mode is used, into llvm dialect.
3426 mlir::populateMathToLLVMConversionPatterns(typeConverter, pattern);
3427 mlir::populateMathToLibmConversionPatterns(pattern, /*benefit=*/0);
3428 mlir::ConversionTarget target{*context};
3429 target.addLegalDialect<mlir::LLVM::LLVMDialect>();
3430 // The OpenMP dialect is legal for Operations without regions, for those
3431 // which contains regions it is legal if the region contains only the
3432 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and
3433 // legalize conversion of OpenMP operations without regions.
3434 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter);
3435 target.addLegalDialect<mlir::omp::OpenMPDialect>();
3436
3437 // required NOPs for applying a full conversion
3438 target.addLegalOp<mlir::ModuleOp>();
3439
3440 // apply the patterns
3441 if (mlir::failed(mlir::applyFullConversion(getModule(), target,
3442 std::move(pattern)))) {
3443 signalPassFailure();
3444 }
3445 }
3446
3447 private:
3448 fir::FIRToLLVMPassOptions options;
3449 };
3450
3451 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module
3452 struct LLVMIRLoweringPass
3453 : public mlir::PassWrapper<LLVMIRLoweringPass,
3454 mlir::OperationPass<mlir::ModuleOp>> {
3455 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass)
3456
LLVMIRLoweringPass__anon447c3e181311::LLVMIRLoweringPass3457 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p)
3458 : output{output}, printer{p} {}
3459
getModule__anon447c3e181311::LLVMIRLoweringPass3460 mlir::ModuleOp getModule() { return getOperation(); }
3461
runOnOperation__anon447c3e181311::LLVMIRLoweringPass3462 void runOnOperation() override final {
3463 auto *ctx = getModule().getContext();
3464 auto optName = getModule().getName();
3465 llvm::LLVMContext llvmCtx;
3466 if (auto llvmModule = mlir::translateModuleToLLVMIR(
3467 getModule(), llvmCtx, optName ? *optName : "FIRModule")) {
3468 printer(*llvmModule, output);
3469 return;
3470 }
3471
3472 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n");
3473 signalPassFailure();
3474 }
3475
3476 private:
3477 llvm::raw_ostream &output;
3478 fir::LLVMIRLoweringPrinter printer;
3479 };
3480
3481 } // namespace
3482
createFIRToLLVMPass()3483 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() {
3484 return std::make_unique<FIRToLLVMLowering>();
3485 }
3486
3487 std::unique_ptr<mlir::Pass>
createFIRToLLVMPass(fir::FIRToLLVMPassOptions options)3488 fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) {
3489 return std::make_unique<FIRToLLVMLowering>(options);
3490 }
3491
3492 std::unique_ptr<mlir::Pass>
createLLVMDialectToLLVMPass(llvm::raw_ostream & output,fir::LLVMIRLoweringPrinter printer)3493 fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output,
3494 fir::LLVMIRLoweringPrinter printer) {
3495 return std::make_unique<LLVMIRLoweringPass>(output, printer);
3496 }
3497