1 //===- ModuleTranslation.cpp - MLIR to LLVM conversion --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the translation between an MLIR LLVM dialect module and 10 // the corresponding LLVMIR module. It only handles core LLVM IR operations. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 15 16 #include "DebugTranslation.h" 17 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 18 #include "mlir/Dialect/OpenMP/OpenMPDialect.h" 19 #include "mlir/IR/Attributes.h" 20 #include "mlir/IR/BuiltinOps.h" 21 #include "mlir/IR/BuiltinTypes.h" 22 #include "mlir/IR/RegionGraphTraits.h" 23 #include "mlir/Support/LLVM.h" 24 #include "mlir/Target/LLVMIR/TypeTranslation.h" 25 #include "llvm/ADT/TypeSwitch.h" 26 27 #include "llvm/ADT/PostOrderIterator.h" 28 #include "llvm/ADT/SetVector.h" 29 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 30 #include "llvm/IR/BasicBlock.h" 31 #include "llvm/IR/CFG.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DerivedTypes.h" 34 #include "llvm/IR/IRBuilder.h" 35 #include "llvm/IR/InlineAsm.h" 36 #include "llvm/IR/LLVMContext.h" 37 #include "llvm/IR/MDBuilder.h" 38 #include "llvm/IR/Module.h" 39 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 40 #include "llvm/Transforms/Utils/Cloning.h" 41 42 using namespace mlir; 43 using namespace mlir::LLVM; 44 using namespace mlir::LLVM::detail; 45 46 #include "mlir/Dialect/LLVMIR/LLVMConversionEnumsToLLVM.inc" 47 48 /// Builds a constant of a sequential LLVM type `type`, potentially containing 49 /// other sequential types recursively, from the individual constant values 50 /// provided in `constants`. `shape` contains the number of elements in nested 51 /// sequential types. Reports errors at `loc` and returns nullptr on error. 52 static llvm::Constant * 53 buildSequentialConstant(ArrayRef<llvm::Constant *> &constants, 54 ArrayRef<int64_t> shape, llvm::Type *type, 55 Location loc) { 56 if (shape.empty()) { 57 llvm::Constant *result = constants.front(); 58 constants = constants.drop_front(); 59 return result; 60 } 61 62 llvm::Type *elementType; 63 if (auto *arrayTy = dyn_cast<llvm::ArrayType>(type)) { 64 elementType = arrayTy->getElementType(); 65 } else if (auto *vectorTy = dyn_cast<llvm::VectorType>(type)) { 66 elementType = vectorTy->getElementType(); 67 } else { 68 emitError(loc) << "expected sequential LLVM types wrapping a scalar"; 69 return nullptr; 70 } 71 72 SmallVector<llvm::Constant *, 8> nested; 73 nested.reserve(shape.front()); 74 for (int64_t i = 0; i < shape.front(); ++i) { 75 nested.push_back(buildSequentialConstant(constants, shape.drop_front(), 76 elementType, loc)); 77 if (!nested.back()) 78 return nullptr; 79 } 80 81 if (shape.size() == 1 && type->isVectorTy()) 82 return llvm::ConstantVector::get(nested); 83 return llvm::ConstantArray::get( 84 llvm::ArrayType::get(elementType, shape.front()), nested); 85 } 86 87 /// Returns the first non-sequential type nested in sequential types. 88 static llvm::Type *getInnermostElementType(llvm::Type *type) { 89 do { 90 if (auto *arrayTy = dyn_cast<llvm::ArrayType>(type)) { 91 type = arrayTy->getElementType(); 92 } else if (auto *vectorTy = dyn_cast<llvm::VectorType>(type)) { 93 type = vectorTy->getElementType(); 94 } else { 95 return type; 96 } 97 } while (1); 98 } 99 100 /// Create an LLVM IR constant of `llvmType` from the MLIR attribute `attr`. 101 /// This currently supports integer, floating point, splat and dense element 102 /// attributes and combinations thereof. In case of error, report it to `loc` 103 /// and return nullptr. 104 llvm::Constant *ModuleTranslation::getLLVMConstant(llvm::Type *llvmType, 105 Attribute attr, 106 Location loc) { 107 if (!attr) 108 return llvm::UndefValue::get(llvmType); 109 if (llvmType->isStructTy()) { 110 emitError(loc, "struct types are not supported in constants"); 111 return nullptr; 112 } 113 // For integer types, we allow a mismatch in sizes as the index type in 114 // MLIR might have a different size than the index type in the LLVM module. 115 if (auto intAttr = attr.dyn_cast<IntegerAttr>()) 116 return llvm::ConstantInt::get( 117 llvmType, 118 intAttr.getValue().sextOrTrunc(llvmType->getIntegerBitWidth())); 119 if (auto floatAttr = attr.dyn_cast<FloatAttr>()) 120 return llvm::ConstantFP::get(llvmType, floatAttr.getValue()); 121 if (auto funcAttr = attr.dyn_cast<FlatSymbolRefAttr>()) 122 return llvm::ConstantExpr::getBitCast( 123 functionMapping.lookup(funcAttr.getValue()), llvmType); 124 if (auto splatAttr = attr.dyn_cast<SplatElementsAttr>()) { 125 llvm::Type *elementType; 126 uint64_t numElements; 127 if (auto *arrayTy = dyn_cast<llvm::ArrayType>(llvmType)) { 128 elementType = arrayTy->getElementType(); 129 numElements = arrayTy->getNumElements(); 130 } else { 131 auto *vectorTy = cast<llvm::FixedVectorType>(llvmType); 132 elementType = vectorTy->getElementType(); 133 numElements = vectorTy->getNumElements(); 134 } 135 // Splat value is a scalar. Extract it only if the element type is not 136 // another sequence type. The recursion terminates because each step removes 137 // one outer sequential type. 138 bool elementTypeSequential = 139 isa<llvm::ArrayType, llvm::VectorType>(elementType); 140 llvm::Constant *child = getLLVMConstant( 141 elementType, 142 elementTypeSequential ? splatAttr : splatAttr.getSplatValue(), loc); 143 if (!child) 144 return nullptr; 145 if (llvmType->isVectorTy()) 146 return llvm::ConstantVector::getSplat( 147 llvm::ElementCount::get(numElements, /*Scalable=*/false), child); 148 if (llvmType->isArrayTy()) { 149 auto *arrayType = llvm::ArrayType::get(elementType, numElements); 150 SmallVector<llvm::Constant *, 8> constants(numElements, child); 151 return llvm::ConstantArray::get(arrayType, constants); 152 } 153 } 154 155 if (auto elementsAttr = attr.dyn_cast<ElementsAttr>()) { 156 assert(elementsAttr.getType().hasStaticShape()); 157 assert(elementsAttr.getNumElements() != 0 && 158 "unexpected empty elements attribute"); 159 assert(!elementsAttr.getType().getShape().empty() && 160 "unexpected empty elements attribute shape"); 161 162 SmallVector<llvm::Constant *, 8> constants; 163 constants.reserve(elementsAttr.getNumElements()); 164 llvm::Type *innermostType = getInnermostElementType(llvmType); 165 for (auto n : elementsAttr.getValues<Attribute>()) { 166 constants.push_back(getLLVMConstant(innermostType, n, loc)); 167 if (!constants.back()) 168 return nullptr; 169 } 170 ArrayRef<llvm::Constant *> constantsRef = constants; 171 llvm::Constant *result = buildSequentialConstant( 172 constantsRef, elementsAttr.getType().getShape(), llvmType, loc); 173 assert(constantsRef.empty() && "did not consume all elemental constants"); 174 return result; 175 } 176 177 if (auto stringAttr = attr.dyn_cast<StringAttr>()) { 178 return llvm::ConstantDataArray::get( 179 llvmModule->getContext(), ArrayRef<char>{stringAttr.getValue().data(), 180 stringAttr.getValue().size()}); 181 } 182 emitError(loc, "unsupported constant value"); 183 return nullptr; 184 } 185 186 /// Convert MLIR integer comparison predicate to LLVM IR comparison predicate. 187 static llvm::CmpInst::Predicate getLLVMCmpPredicate(ICmpPredicate p) { 188 switch (p) { 189 case LLVM::ICmpPredicate::eq: 190 return llvm::CmpInst::Predicate::ICMP_EQ; 191 case LLVM::ICmpPredicate::ne: 192 return llvm::CmpInst::Predicate::ICMP_NE; 193 case LLVM::ICmpPredicate::slt: 194 return llvm::CmpInst::Predicate::ICMP_SLT; 195 case LLVM::ICmpPredicate::sle: 196 return llvm::CmpInst::Predicate::ICMP_SLE; 197 case LLVM::ICmpPredicate::sgt: 198 return llvm::CmpInst::Predicate::ICMP_SGT; 199 case LLVM::ICmpPredicate::sge: 200 return llvm::CmpInst::Predicate::ICMP_SGE; 201 case LLVM::ICmpPredicate::ult: 202 return llvm::CmpInst::Predicate::ICMP_ULT; 203 case LLVM::ICmpPredicate::ule: 204 return llvm::CmpInst::Predicate::ICMP_ULE; 205 case LLVM::ICmpPredicate::ugt: 206 return llvm::CmpInst::Predicate::ICMP_UGT; 207 case LLVM::ICmpPredicate::uge: 208 return llvm::CmpInst::Predicate::ICMP_UGE; 209 } 210 llvm_unreachable("incorrect comparison predicate"); 211 } 212 213 static llvm::CmpInst::Predicate getLLVMCmpPredicate(FCmpPredicate p) { 214 switch (p) { 215 case LLVM::FCmpPredicate::_false: 216 return llvm::CmpInst::Predicate::FCMP_FALSE; 217 case LLVM::FCmpPredicate::oeq: 218 return llvm::CmpInst::Predicate::FCMP_OEQ; 219 case LLVM::FCmpPredicate::ogt: 220 return llvm::CmpInst::Predicate::FCMP_OGT; 221 case LLVM::FCmpPredicate::oge: 222 return llvm::CmpInst::Predicate::FCMP_OGE; 223 case LLVM::FCmpPredicate::olt: 224 return llvm::CmpInst::Predicate::FCMP_OLT; 225 case LLVM::FCmpPredicate::ole: 226 return llvm::CmpInst::Predicate::FCMP_OLE; 227 case LLVM::FCmpPredicate::one: 228 return llvm::CmpInst::Predicate::FCMP_ONE; 229 case LLVM::FCmpPredicate::ord: 230 return llvm::CmpInst::Predicate::FCMP_ORD; 231 case LLVM::FCmpPredicate::ueq: 232 return llvm::CmpInst::Predicate::FCMP_UEQ; 233 case LLVM::FCmpPredicate::ugt: 234 return llvm::CmpInst::Predicate::FCMP_UGT; 235 case LLVM::FCmpPredicate::uge: 236 return llvm::CmpInst::Predicate::FCMP_UGE; 237 case LLVM::FCmpPredicate::ult: 238 return llvm::CmpInst::Predicate::FCMP_ULT; 239 case LLVM::FCmpPredicate::ule: 240 return llvm::CmpInst::Predicate::FCMP_ULE; 241 case LLVM::FCmpPredicate::une: 242 return llvm::CmpInst::Predicate::FCMP_UNE; 243 case LLVM::FCmpPredicate::uno: 244 return llvm::CmpInst::Predicate::FCMP_UNO; 245 case LLVM::FCmpPredicate::_true: 246 return llvm::CmpInst::Predicate::FCMP_TRUE; 247 } 248 llvm_unreachable("incorrect comparison predicate"); 249 } 250 251 static llvm::AtomicRMWInst::BinOp getLLVMAtomicBinOp(AtomicBinOp op) { 252 switch (op) { 253 case LLVM::AtomicBinOp::xchg: 254 return llvm::AtomicRMWInst::BinOp::Xchg; 255 case LLVM::AtomicBinOp::add: 256 return llvm::AtomicRMWInst::BinOp::Add; 257 case LLVM::AtomicBinOp::sub: 258 return llvm::AtomicRMWInst::BinOp::Sub; 259 case LLVM::AtomicBinOp::_and: 260 return llvm::AtomicRMWInst::BinOp::And; 261 case LLVM::AtomicBinOp::nand: 262 return llvm::AtomicRMWInst::BinOp::Nand; 263 case LLVM::AtomicBinOp::_or: 264 return llvm::AtomicRMWInst::BinOp::Or; 265 case LLVM::AtomicBinOp::_xor: 266 return llvm::AtomicRMWInst::BinOp::Xor; 267 case LLVM::AtomicBinOp::max: 268 return llvm::AtomicRMWInst::BinOp::Max; 269 case LLVM::AtomicBinOp::min: 270 return llvm::AtomicRMWInst::BinOp::Min; 271 case LLVM::AtomicBinOp::umax: 272 return llvm::AtomicRMWInst::BinOp::UMax; 273 case LLVM::AtomicBinOp::umin: 274 return llvm::AtomicRMWInst::BinOp::UMin; 275 case LLVM::AtomicBinOp::fadd: 276 return llvm::AtomicRMWInst::BinOp::FAdd; 277 case LLVM::AtomicBinOp::fsub: 278 return llvm::AtomicRMWInst::BinOp::FSub; 279 } 280 llvm_unreachable("incorrect atomic binary operator"); 281 } 282 283 static llvm::AtomicOrdering getLLVMAtomicOrdering(AtomicOrdering ordering) { 284 switch (ordering) { 285 case LLVM::AtomicOrdering::not_atomic: 286 return llvm::AtomicOrdering::NotAtomic; 287 case LLVM::AtomicOrdering::unordered: 288 return llvm::AtomicOrdering::Unordered; 289 case LLVM::AtomicOrdering::monotonic: 290 return llvm::AtomicOrdering::Monotonic; 291 case LLVM::AtomicOrdering::acquire: 292 return llvm::AtomicOrdering::Acquire; 293 case LLVM::AtomicOrdering::release: 294 return llvm::AtomicOrdering::Release; 295 case LLVM::AtomicOrdering::acq_rel: 296 return llvm::AtomicOrdering::AcquireRelease; 297 case LLVM::AtomicOrdering::seq_cst: 298 return llvm::AtomicOrdering::SequentiallyConsistent; 299 } 300 llvm_unreachable("incorrect atomic ordering"); 301 } 302 303 ModuleTranslation::ModuleTranslation(Operation *module, 304 std::unique_ptr<llvm::Module> llvmModule) 305 : mlirModule(module), llvmModule(std::move(llvmModule)), 306 debugTranslation( 307 std::make_unique<DebugTranslation>(module, *this->llvmModule)), 308 ompDialect(module->getContext()->getLoadedDialect("omp")), 309 typeTranslator(this->llvmModule->getContext()) { 310 assert(satisfiesLLVMModule(mlirModule) && 311 "mlirModule should honor LLVM's module semantics."); 312 } 313 ModuleTranslation::~ModuleTranslation() { 314 if (ompBuilder) 315 ompBuilder->finalize(); 316 } 317 318 /// Get the SSA value passed to the current block from the terminator operation 319 /// of its predecessor. 320 static Value getPHISourceValue(Block *current, Block *pred, 321 unsigned numArguments, unsigned index) { 322 Operation &terminator = *pred->getTerminator(); 323 if (isa<LLVM::BrOp>(terminator)) 324 return terminator.getOperand(index); 325 326 SuccessorRange successors = terminator.getSuccessors(); 327 assert(std::adjacent_find(successors.begin(), successors.end()) == 328 successors.end() && 329 "successors with arguments in LLVM branches must be different blocks"); 330 (void)successors; 331 332 // For instructions that branch based on a condition value, we need to take 333 // the operands for the branch that was taken. 334 if (auto condBranchOp = dyn_cast<LLVM::CondBrOp>(terminator)) { 335 // For conditional branches, we take the operands from either the "true" or 336 // the "false" branch. 337 return condBranchOp.getSuccessor(0) == current 338 ? condBranchOp.trueDestOperands()[index] 339 : condBranchOp.falseDestOperands()[index]; 340 } else if (auto switchOp = dyn_cast<LLVM::SwitchOp>(terminator)) { 341 // For switches, we take the operands from either the default case, or from 342 // the case branch that was taken. 343 if (switchOp.defaultDestination() == current) 344 return switchOp.defaultOperands()[index]; 345 for (auto i : llvm::enumerate(switchOp.caseDestinations())) 346 if (i.value() == current) 347 return switchOp.getCaseOperands(i.index())[index]; 348 } 349 350 llvm_unreachable("only branch or switch operations can be terminators of a " 351 "block that has successors"); 352 } 353 354 /// Connect the PHI nodes to the results of preceding blocks. 355 template <typename T> 356 static void connectPHINodes( 357 T &func, const DenseMap<Value, llvm::Value *> &valueMapping, 358 const DenseMap<Block *, llvm::BasicBlock *> &blockMapping, 359 const DenseMap<Operation *, llvm::Instruction *> &branchMapping) { 360 // Skip the first block, it cannot be branched to and its arguments correspond 361 // to the arguments of the LLVM function. 362 for (auto it = std::next(func.begin()), eit = func.end(); it != eit; ++it) { 363 Block *bb = &*it; 364 llvm::BasicBlock *llvmBB = blockMapping.lookup(bb); 365 auto phis = llvmBB->phis(); 366 auto numArguments = bb->getNumArguments(); 367 assert(numArguments == std::distance(phis.begin(), phis.end())); 368 for (auto &numberedPhiNode : llvm::enumerate(phis)) { 369 auto &phiNode = numberedPhiNode.value(); 370 unsigned index = numberedPhiNode.index(); 371 for (auto *pred : bb->getPredecessors()) { 372 // Find the LLVM IR block that contains the converted terminator 373 // instruction and use it in the PHI node. Note that this block is not 374 // necessarily the same as blockMapping.lookup(pred), some operations 375 // (in particular, OpenMP operations using OpenMPIRBuilder) may have 376 // split the blocks. 377 llvm::Instruction *terminator = 378 branchMapping.lookup(pred->getTerminator()); 379 assert(terminator && "missing the mapping for a terminator"); 380 phiNode.addIncoming(valueMapping.lookup(getPHISourceValue( 381 bb, pred, numArguments, index)), 382 terminator->getParent()); 383 } 384 } 385 } 386 } 387 388 /// Sort function blocks topologically. 389 template <typename T> 390 static llvm::SetVector<Block *> topologicalSort(T &f) { 391 // For each block that has not been visited yet (i.e. that has no 392 // predecessors), add it to the list as well as its successors. 393 llvm::SetVector<Block *> blocks; 394 for (Block &b : f) { 395 if (blocks.count(&b) == 0) { 396 llvm::ReversePostOrderTraversal<Block *> traversal(&b); 397 blocks.insert(traversal.begin(), traversal.end()); 398 } 399 } 400 assert(blocks.size() == f.getBlocks().size() && "some blocks are not sorted"); 401 402 return blocks; 403 } 404 405 /// Convert the OpenMP parallel Operation to LLVM IR. 406 LogicalResult 407 ModuleTranslation::convertOmpParallel(Operation &opInst, 408 llvm::IRBuilder<> &builder) { 409 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 410 // TODO: support error propagation in OpenMPIRBuilder and use it instead of 411 // relying on captured variables. 412 LogicalResult bodyGenStatus = success(); 413 414 auto bodyGenCB = [&](InsertPointTy allocaIP, InsertPointTy codeGenIP, 415 llvm::BasicBlock &continuationIP) { 416 llvm::LLVMContext &llvmContext = llvmModule->getContext(); 417 418 llvm::BasicBlock *codeGenIPBB = codeGenIP.getBlock(); 419 llvm::Instruction *codeGenIPBBTI = codeGenIPBB->getTerminator(); 420 ompContinuationIPStack.push_back(&continuationIP); 421 422 // ParallelOp has only `1` region associated with it. 423 auto ®ion = cast<omp::ParallelOp>(opInst).getRegion(); 424 for (auto &bb : region) { 425 auto *llvmBB = llvm::BasicBlock::Create( 426 llvmContext, "omp.par.region", codeGenIP.getBlock()->getParent()); 427 blockMapping[&bb] = llvmBB; 428 } 429 430 convertOmpOpRegions(region, valueMapping, blockMapping, codeGenIPBBTI, 431 continuationIP, builder, bodyGenStatus); 432 ompContinuationIPStack.pop_back(); 433 434 }; 435 436 // TODO: Perform appropriate actions according to the data-sharing 437 // attribute (shared, private, firstprivate, ...) of variables. 438 // Currently defaults to shared. 439 auto privCB = [&](InsertPointTy allocaIP, InsertPointTy codeGenIP, 440 llvm::Value &, llvm::Value &vPtr, 441 llvm::Value *&replacementValue) -> InsertPointTy { 442 replacementValue = &vPtr; 443 444 return codeGenIP; 445 }; 446 447 // TODO: Perform finalization actions for variables. This has to be 448 // called for variables which have destructors/finalizers. 449 auto finiCB = [&](InsertPointTy codeGenIP) {}; 450 451 llvm::Value *ifCond = nullptr; 452 if (auto ifExprVar = cast<omp::ParallelOp>(opInst).if_expr_var()) 453 ifCond = valueMapping.lookup(ifExprVar); 454 llvm::Value *numThreads = nullptr; 455 if (auto numThreadsVar = cast<omp::ParallelOp>(opInst).num_threads_var()) 456 numThreads = valueMapping.lookup(numThreadsVar); 457 llvm::omp::ProcBindKind pbKind = llvm::omp::OMP_PROC_BIND_default; 458 if (auto bind = cast<omp::ParallelOp>(opInst).proc_bind_val()) 459 pbKind = llvm::omp::getProcBindKind(bind.getValue()); 460 // TODO: Is the Parallel construct cancellable? 461 bool isCancellable = false; 462 // TODO: Determine the actual alloca insertion point, e.g., the function 463 // entry or the alloca insertion point as provided by the body callback 464 // above. 465 llvm::OpenMPIRBuilder::InsertPointTy allocaIP(builder.saveIP()); 466 if (failed(bodyGenStatus)) 467 return failure(); 468 builder.restoreIP( 469 ompBuilder->createParallel(builder, allocaIP, bodyGenCB, privCB, finiCB, 470 ifCond, numThreads, pbKind, isCancellable)); 471 return success(); 472 } 473 474 void ModuleTranslation::convertOmpOpRegions( 475 Region ®ion, DenseMap<Value, llvm::Value *> &valueMapping, 476 DenseMap<Block *, llvm::BasicBlock *> &blockMapping, 477 llvm::Instruction *codeGenIPBBTI, llvm::BasicBlock &continuationIP, 478 llvm::IRBuilder<> &builder, LogicalResult &bodyGenStatus) { 479 // Convert blocks one by one in topological order to ensure 480 // defs are converted before uses. 481 llvm::SetVector<Block *> blocks = topologicalSort(region); 482 for (auto indexedBB : llvm::enumerate(blocks)) { 483 Block *bb = indexedBB.value(); 484 llvm::BasicBlock *curLLVMBB = blockMapping[bb]; 485 if (bb->isEntryBlock()) { 486 assert(codeGenIPBBTI->getNumSuccessors() == 1 && 487 "OpenMPIRBuilder provided entry block has multiple successors"); 488 assert(codeGenIPBBTI->getSuccessor(0) == &continuationIP && 489 "ContinuationIP is not the successor of OpenMPIRBuilder " 490 "provided entry block"); 491 codeGenIPBBTI->setSuccessor(0, curLLVMBB); 492 } 493 494 if (failed(convertBlock(*bb, /*ignoreArguments=*/indexedBB.index() == 0))) { 495 bodyGenStatus = failure(); 496 return; 497 } 498 } 499 // Finally, after all blocks have been traversed and values mapped, 500 // connect the PHI nodes to the results of preceding blocks. 501 connectPHINodes(region, valueMapping, blockMapping, branchMapping); 502 } 503 504 LogicalResult ModuleTranslation::convertOmpMaster(Operation &opInst, 505 llvm::IRBuilder<> &builder) { 506 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; 507 // TODO: support error propagation in OpenMPIRBuilder and use it instead of 508 // relying on captured variables. 509 LogicalResult bodyGenStatus = success(); 510 511 auto bodyGenCB = [&](InsertPointTy allocaIP, InsertPointTy codeGenIP, 512 llvm::BasicBlock &continuationIP) { 513 llvm::LLVMContext &llvmContext = llvmModule->getContext(); 514 515 llvm::BasicBlock *codeGenIPBB = codeGenIP.getBlock(); 516 llvm::Instruction *codeGenIPBBTI = codeGenIPBB->getTerminator(); 517 ompContinuationIPStack.push_back(&continuationIP); 518 519 // MasterOp has only `1` region associated with it. 520 auto ®ion = cast<omp::MasterOp>(opInst).getRegion(); 521 for (auto &bb : region) { 522 auto *llvmBB = llvm::BasicBlock::Create( 523 llvmContext, "omp.master.region", codeGenIP.getBlock()->getParent()); 524 blockMapping[&bb] = llvmBB; 525 } 526 convertOmpOpRegions(region, valueMapping, blockMapping, codeGenIPBBTI, 527 continuationIP, builder, bodyGenStatus); 528 ompContinuationIPStack.pop_back(); 529 }; 530 531 // TODO: Perform finalization actions for variables. This has to be 532 // called for variables which have destructors/finalizers. 533 auto finiCB = [&](InsertPointTy codeGenIP) {}; 534 535 builder.restoreIP(ompBuilder->createMaster(builder, bodyGenCB, finiCB)); 536 return success(); 537 } 538 539 /// Converts an OpenMP workshare loop into LLVM IR using OpenMPIRBuilder. 540 LogicalResult ModuleTranslation::convertOmpWsLoop(Operation &opInst, 541 llvm::IRBuilder<> &builder) { 542 auto loop = cast<omp::WsLoopOp>(opInst); 543 // TODO: this should be in the op verifier instead. 544 if (loop.lowerBound().empty()) 545 return failure(); 546 547 if (loop.getNumLoops() != 1) 548 return opInst.emitOpError("collapsed loops not yet supported"); 549 550 if (loop.schedule_val().hasValue() && 551 omp::symbolizeClauseScheduleKind(loop.schedule_val().getValue()) != 552 omp::ClauseScheduleKind::Static) 553 return opInst.emitOpError( 554 "only static (default) loop schedule is currently supported"); 555 556 llvm::Function *func = builder.GetInsertBlock()->getParent(); 557 llvm::LLVMContext &llvmContext = llvmModule->getContext(); 558 559 // Find the loop configuration. 560 llvm::Value *lowerBound = valueMapping.lookup(loop.lowerBound()[0]); 561 llvm::Value *upperBound = valueMapping.lookup(loop.upperBound()[0]); 562 llvm::Value *step = valueMapping.lookup(loop.step()[0]); 563 llvm::Type *ivType = step->getType(); 564 llvm::Value *chunk = loop.schedule_chunk_var() 565 ? valueMapping[loop.schedule_chunk_var()] 566 : llvm::ConstantInt::get(ivType, 1); 567 568 // Set up the source location value for OpenMP runtime. 569 llvm::DISubprogram *subprogram = 570 builder.GetInsertBlock()->getParent()->getSubprogram(); 571 const llvm::DILocation *diLoc = 572 debugTranslation->translateLoc(opInst.getLoc(), subprogram); 573 llvm::OpenMPIRBuilder::LocationDescription ompLoc(builder.saveIP(), 574 llvm::DebugLoc(diLoc)); 575 576 // Generator of the canonical loop body. Produces an SESE region of basic 577 // blocks. 578 // TODO: support error propagation in OpenMPIRBuilder and use it instead of 579 // relying on captured variables. 580 LogicalResult bodyGenStatus = success(); 581 auto bodyGen = [&](llvm::OpenMPIRBuilder::InsertPointTy ip, llvm::Value *iv) { 582 llvm::IRBuilder<>::InsertPointGuard guard(builder); 583 584 // Make sure further conversions know about the induction variable. 585 valueMapping[loop.getRegion().front().getArgument(0)] = iv; 586 587 llvm::BasicBlock *entryBlock = ip.getBlock(); 588 llvm::BasicBlock *exitBlock = 589 entryBlock->splitBasicBlock(ip.getPoint(), "omp.wsloop.exit"); 590 591 // Convert the body of the loop. 592 Region ®ion = loop.region(); 593 for (Block &bb : region) { 594 llvm::BasicBlock *llvmBB = 595 llvm::BasicBlock::Create(llvmContext, "omp.wsloop.region", func); 596 blockMapping[&bb] = llvmBB; 597 598 // Retarget the branch of the entry block to the entry block of the 599 // converted region (regions are single-entry). 600 if (bb.isEntryBlock()) { 601 auto *branch = cast<llvm::BranchInst>(entryBlock->getTerminator()); 602 branch->setSuccessor(0, llvmBB); 603 } 604 } 605 606 // Block conversion creates a new IRBuilder every time so need not bother 607 // about maintaining the insertion point. 608 llvm::SetVector<Block *> blocks = topologicalSort(region); 609 for (Block *bb : blocks) { 610 if (failed(convertBlock(*bb, bb->isEntryBlock()))) { 611 bodyGenStatus = failure(); 612 return; 613 } 614 615 // Special handling for `omp.yield` terminators (we may have more than 616 // one): they return the control to the parent WsLoop operation so replace 617 // them with the branch to the exit block. We handle this here to avoid 618 // relying inter-function communication through the ModuleTranslation 619 // class to set up the correct insertion point. This is also consistent 620 // with MLIR's idiom of handling special region terminators in the same 621 // code that handles the region-owning operation. 622 if (isa<omp::YieldOp>(bb->getTerminator())) { 623 llvm::BasicBlock *llvmBB = blockMapping[bb]; 624 builder.SetInsertPoint(llvmBB, llvmBB->end()); 625 builder.CreateBr(exitBlock); 626 } 627 } 628 629 connectPHINodes(region, valueMapping, blockMapping, branchMapping); 630 }; 631 632 // Delegate actual loop construction to the OpenMP IRBuilder. 633 // TODO: this currently assumes WsLoop is semantically similar to SCF loop, 634 // i.e. it has a positive step, uses signed integer semantics, and its upper 635 // bound is not included. Reconsider this code when WsLoop clearly supports 636 // more cases. 637 llvm::BasicBlock *insertBlock = builder.GetInsertBlock(); 638 llvm::CanonicalLoopInfo *loopInfo = ompBuilder->createCanonicalLoop( 639 ompLoc, bodyGen, lowerBound, upperBound, step, /*IsSigned=*/true, 640 /*InclusiveStop=*/false); 641 if (failed(bodyGenStatus)) 642 return failure(); 643 644 // TODO: get the alloca insertion point from the parallel operation builder. 645 // If we insert the at the top of the current function, they will be passed as 646 // extra arguments into the function the parallel operation builder outlines. 647 // Put them at the start of the current block for now. 648 llvm::OpenMPIRBuilder::InsertPointTy allocaIP( 649 insertBlock, insertBlock->getFirstInsertionPt()); 650 loopInfo = ompBuilder->createStaticWorkshareLoop( 651 ompLoc, loopInfo, allocaIP, 652 !loop.nowait().hasValue() || loop.nowait().getValue(), chunk); 653 654 // Continue building IR after the loop. 655 builder.restoreIP(loopInfo->getAfterIP()); 656 return success(); 657 } 658 659 /// Given an OpenMP MLIR operation, create the corresponding LLVM IR 660 /// (including OpenMP runtime calls). 661 LogicalResult 662 ModuleTranslation::convertOmpOperation(Operation &opInst, 663 llvm::IRBuilder<> &builder) { 664 if (!ompBuilder) { 665 ompBuilder = std::make_unique<llvm::OpenMPIRBuilder>(*llvmModule); 666 ompBuilder->initialize(); 667 } 668 return llvm::TypeSwitch<Operation *, LogicalResult>(&opInst) 669 .Case([&](omp::BarrierOp) { 670 ompBuilder->createBarrier(builder.saveIP(), llvm::omp::OMPD_barrier); 671 return success(); 672 }) 673 .Case([&](omp::TaskwaitOp) { 674 ompBuilder->createTaskwait(builder.saveIP()); 675 return success(); 676 }) 677 .Case([&](omp::TaskyieldOp) { 678 ompBuilder->createTaskyield(builder.saveIP()); 679 return success(); 680 }) 681 .Case([&](omp::FlushOp) { 682 // No support in Openmp runtime function (__kmpc_flush) to accept 683 // the argument list. 684 // OpenMP standard states the following: 685 // "An implementation may implement a flush with a list by ignoring 686 // the list, and treating it the same as a flush without a list." 687 // 688 // The argument list is discarded so that, flush with a list is treated 689 // same as a flush without a list. 690 ompBuilder->createFlush(builder.saveIP()); 691 return success(); 692 }) 693 .Case([&](omp::TerminatorOp) { 694 builder.CreateBr(ompContinuationIPStack.back()); 695 return success(); 696 }) 697 .Case( 698 [&](omp::ParallelOp) { return convertOmpParallel(opInst, builder); }) 699 .Case([&](omp::MasterOp) { return convertOmpMaster(opInst, builder); }) 700 .Case([&](omp::WsLoopOp) { return convertOmpWsLoop(opInst, builder); }) 701 .Case([&](omp::YieldOp op) { 702 // Yields are loop terminators that can be just omitted. The loop 703 // structure was created in the function that handles WsLoopOp. 704 assert(op.getNumOperands() == 0 && "unexpected yield with operands"); 705 return success(); 706 }) 707 .Default([&](Operation *inst) { 708 return inst->emitError("unsupported OpenMP operation: ") 709 << inst->getName(); 710 }); 711 } 712 713 /// Given a single MLIR operation, create the corresponding LLVM IR operation 714 /// using the `builder`. LLVM IR Builder does not have a generic interface so 715 /// this has to be a long chain of `if`s calling different functions with a 716 /// different number of arguments. 717 LogicalResult ModuleTranslation::convertOperation(Operation &opInst, 718 llvm::IRBuilder<> &builder) { 719 auto extractPosition = [](ArrayAttr attr) { 720 SmallVector<unsigned, 4> position; 721 position.reserve(attr.size()); 722 for (Attribute v : attr) 723 position.push_back(v.cast<IntegerAttr>().getValue().getZExtValue()); 724 return position; 725 }; 726 727 #include "mlir/Dialect/LLVMIR/LLVMConversions.inc" 728 729 // Emit function calls. If the "callee" attribute is present, this is a 730 // direct function call and we also need to look up the remapped function 731 // itself. Otherwise, this is an indirect call and the callee is the first 732 // operand, look it up as a normal value. Return the llvm::Value representing 733 // the function result, which may be of llvm::VoidTy type. 734 auto convertCall = [this, &builder](Operation &op) -> llvm::Value * { 735 auto operands = lookupValues(op.getOperands()); 736 ArrayRef<llvm::Value *> operandsRef(operands); 737 if (auto attr = op.getAttrOfType<FlatSymbolRefAttr>("callee")) { 738 return builder.CreateCall(functionMapping.lookup(attr.getValue()), 739 operandsRef); 740 } else { 741 auto *calleePtrType = 742 cast<llvm::PointerType>(operandsRef.front()->getType()); 743 auto *calleeType = 744 cast<llvm::FunctionType>(calleePtrType->getElementType()); 745 return builder.CreateCall(calleeType, operandsRef.front(), 746 operandsRef.drop_front()); 747 } 748 }; 749 750 // Emit calls. If the called function has a result, remap the corresponding 751 // value. Note that LLVM IR dialect CallOp has either 0 or 1 result. 752 if (isa<LLVM::CallOp>(opInst)) { 753 llvm::Value *result = convertCall(opInst); 754 if (opInst.getNumResults() != 0) { 755 valueMapping[opInst.getResult(0)] = result; 756 return success(); 757 } 758 // Check that LLVM call returns void for 0-result functions. 759 return success(result->getType()->isVoidTy()); 760 } 761 762 if (auto inlineAsmOp = dyn_cast<LLVM::InlineAsmOp>(opInst)) { 763 // TODO: refactor function type creation which usually occurs in std-LLVM 764 // conversion. 765 SmallVector<LLVM::LLVMType, 8> operandTypes; 766 operandTypes.reserve(inlineAsmOp.operands().size()); 767 for (auto t : inlineAsmOp.operands().getTypes()) 768 operandTypes.push_back(t.cast<LLVM::LLVMType>()); 769 770 LLVM::LLVMType resultType; 771 if (inlineAsmOp.getNumResults() == 0) { 772 resultType = LLVM::LLVMVoidType::get(mlirModule->getContext()); 773 } else { 774 assert(inlineAsmOp.getNumResults() == 1); 775 resultType = inlineAsmOp.getResultTypes()[0].cast<LLVM::LLVMType>(); 776 } 777 auto ft = LLVM::LLVMFunctionType::get(resultType, operandTypes); 778 llvm::InlineAsm *inlineAsmInst = 779 inlineAsmOp.asm_dialect().hasValue() 780 ? llvm::InlineAsm::get( 781 static_cast<llvm::FunctionType *>(convertType(ft)), 782 inlineAsmOp.asm_string(), inlineAsmOp.constraints(), 783 inlineAsmOp.has_side_effects(), inlineAsmOp.is_align_stack(), 784 convertAsmDialectToLLVM(*inlineAsmOp.asm_dialect())) 785 : llvm::InlineAsm::get( 786 static_cast<llvm::FunctionType *>(convertType(ft)), 787 inlineAsmOp.asm_string(), inlineAsmOp.constraints(), 788 inlineAsmOp.has_side_effects(), inlineAsmOp.is_align_stack()); 789 llvm::Value *result = 790 builder.CreateCall(inlineAsmInst, lookupValues(inlineAsmOp.operands())); 791 if (opInst.getNumResults() != 0) 792 valueMapping[opInst.getResult(0)] = result; 793 return success(); 794 } 795 796 if (auto invOp = dyn_cast<LLVM::InvokeOp>(opInst)) { 797 auto operands = lookupValues(opInst.getOperands()); 798 ArrayRef<llvm::Value *> operandsRef(operands); 799 if (auto attr = opInst.getAttrOfType<FlatSymbolRefAttr>("callee")) { 800 builder.CreateInvoke(functionMapping.lookup(attr.getValue()), 801 blockMapping[invOp.getSuccessor(0)], 802 blockMapping[invOp.getSuccessor(1)], operandsRef); 803 } else { 804 auto *calleePtrType = 805 cast<llvm::PointerType>(operandsRef.front()->getType()); 806 auto *calleeType = 807 cast<llvm::FunctionType>(calleePtrType->getElementType()); 808 builder.CreateInvoke( 809 calleeType, operandsRef.front(), blockMapping[invOp.getSuccessor(0)], 810 blockMapping[invOp.getSuccessor(1)], operandsRef.drop_front()); 811 } 812 return success(); 813 } 814 815 if (auto lpOp = dyn_cast<LLVM::LandingpadOp>(opInst)) { 816 llvm::Type *ty = convertType(lpOp.getType().cast<LLVMType>()); 817 llvm::LandingPadInst *lpi = 818 builder.CreateLandingPad(ty, lpOp.getNumOperands()); 819 820 // Add clauses 821 for (auto operand : lookupValues(lpOp.getOperands())) { 822 // All operands should be constant - checked by verifier 823 if (auto constOperand = dyn_cast<llvm::Constant>(operand)) 824 lpi->addClause(constOperand); 825 } 826 valueMapping[lpOp.getResult()] = lpi; 827 return success(); 828 } 829 830 // Emit branches. We need to look up the remapped blocks and ignore the block 831 // arguments that were transformed into PHI nodes. 832 if (auto brOp = dyn_cast<LLVM::BrOp>(opInst)) { 833 llvm::BranchInst *branch = 834 builder.CreateBr(blockMapping[brOp.getSuccessor()]); 835 branchMapping.try_emplace(&opInst, branch); 836 return success(); 837 } 838 if (auto condbrOp = dyn_cast<LLVM::CondBrOp>(opInst)) { 839 auto weights = condbrOp.branch_weights(); 840 llvm::MDNode *branchWeights = nullptr; 841 if (weights) { 842 // Map weight attributes to LLVM metadata. 843 auto trueWeight = 844 weights.getValue().getValue(0).cast<IntegerAttr>().getInt(); 845 auto falseWeight = 846 weights.getValue().getValue(1).cast<IntegerAttr>().getInt(); 847 branchWeights = 848 llvm::MDBuilder(llvmModule->getContext()) 849 .createBranchWeights(static_cast<uint32_t>(trueWeight), 850 static_cast<uint32_t>(falseWeight)); 851 } 852 llvm::BranchInst *branch = builder.CreateCondBr( 853 valueMapping.lookup(condbrOp.getOperand(0)), 854 blockMapping[condbrOp.getSuccessor(0)], 855 blockMapping[condbrOp.getSuccessor(1)], branchWeights); 856 branchMapping.try_emplace(&opInst, branch); 857 return success(); 858 } 859 if (auto switchOp = dyn_cast<LLVM::SwitchOp>(opInst)) { 860 llvm::MDNode *branchWeights = nullptr; 861 if (auto weights = switchOp.branch_weights()) { 862 llvm::SmallVector<uint32_t> weightValues; 863 weightValues.reserve(weights->size()); 864 for (llvm::APInt weight : weights->cast<DenseIntElementsAttr>()) 865 weightValues.push_back(weight.getLimitedValue()); 866 branchWeights = llvm::MDBuilder(llvmModule->getContext()) 867 .createBranchWeights(weightValues); 868 } 869 870 llvm::SwitchInst *switchInst = 871 builder.CreateSwitch(valueMapping[switchOp.value()], 872 blockMapping[switchOp.defaultDestination()], 873 switchOp.caseDestinations().size(), branchWeights); 874 875 auto *ty = llvm::cast<llvm::IntegerType>( 876 convertType(switchOp.value().getType().cast<LLVMType>())); 877 for (auto i : 878 llvm::zip(switchOp.case_values()->cast<DenseIntElementsAttr>(), 879 switchOp.caseDestinations())) 880 switchInst->addCase( 881 llvm::ConstantInt::get(ty, std::get<0>(i).getLimitedValue()), 882 blockMapping[std::get<1>(i)]); 883 884 branchMapping.try_emplace(&opInst, switchInst); 885 return success(); 886 } 887 888 // Emit addressof. We need to look up the global value referenced by the 889 // operation and store it in the MLIR-to-LLVM value mapping. This does not 890 // emit any LLVM instruction. 891 if (auto addressOfOp = dyn_cast<LLVM::AddressOfOp>(opInst)) { 892 LLVM::GlobalOp global = addressOfOp.getGlobal(); 893 LLVM::LLVMFuncOp function = addressOfOp.getFunction(); 894 895 // The verifier should not have allowed this. 896 assert((global || function) && 897 "referencing an undefined global or function"); 898 899 valueMapping[addressOfOp.getResult()] = 900 global ? globalsMapping.lookup(global) 901 : functionMapping.lookup(function.getName()); 902 return success(); 903 } 904 905 if (ompDialect && opInst.getDialect() == ompDialect) 906 return convertOmpOperation(opInst, builder); 907 908 return opInst.emitError("unsupported or non-LLVM operation: ") 909 << opInst.getName(); 910 } 911 912 /// Convert block to LLVM IR. Unless `ignoreArguments` is set, emit PHI nodes 913 /// to define values corresponding to the MLIR block arguments. These nodes 914 /// are not connected to the source basic blocks, which may not exist yet. 915 LogicalResult ModuleTranslation::convertBlock(Block &bb, bool ignoreArguments) { 916 llvm::IRBuilder<> builder(blockMapping[&bb]); 917 auto *subprogram = builder.GetInsertBlock()->getParent()->getSubprogram(); 918 919 // Before traversing operations, make block arguments available through 920 // value remapping and PHI nodes, but do not add incoming edges for the PHI 921 // nodes just yet: those values may be defined by this or following blocks. 922 // This step is omitted if "ignoreArguments" is set. The arguments of the 923 // first block have been already made available through the remapping of 924 // LLVM function arguments. 925 if (!ignoreArguments) { 926 auto predecessors = bb.getPredecessors(); 927 unsigned numPredecessors = 928 std::distance(predecessors.begin(), predecessors.end()); 929 for (auto arg : bb.getArguments()) { 930 auto wrappedType = arg.getType().dyn_cast<LLVM::LLVMType>(); 931 if (!wrappedType) 932 return emitError(bb.front().getLoc(), 933 "block argument does not have an LLVM type"); 934 llvm::Type *type = convertType(wrappedType); 935 llvm::PHINode *phi = builder.CreatePHI(type, numPredecessors); 936 valueMapping[arg] = phi; 937 } 938 } 939 940 // Traverse operations. 941 for (auto &op : bb) { 942 // Set the current debug location within the builder. 943 builder.SetCurrentDebugLocation( 944 debugTranslation->translateLoc(op.getLoc(), subprogram)); 945 946 if (failed(convertOperation(op, builder))) 947 return failure(); 948 } 949 950 return success(); 951 } 952 953 /// Create named global variables that correspond to llvm.mlir.global 954 /// definitions. 955 LogicalResult ModuleTranslation::convertGlobals() { 956 for (auto op : getModuleBody(mlirModule).getOps<LLVM::GlobalOp>()) { 957 llvm::Type *type = convertType(op.getType()); 958 llvm::Constant *cst = llvm::UndefValue::get(type); 959 if (op.getValueOrNull()) { 960 // String attributes are treated separately because they cannot appear as 961 // in-function constants and are thus not supported by getLLVMConstant. 962 if (auto strAttr = op.getValueOrNull().dyn_cast_or_null<StringAttr>()) { 963 cst = llvm::ConstantDataArray::getString( 964 llvmModule->getContext(), strAttr.getValue(), /*AddNull=*/false); 965 type = cst->getType(); 966 } else if (!(cst = getLLVMConstant(type, op.getValueOrNull(), 967 op.getLoc()))) { 968 return failure(); 969 } 970 } else if (Block *initializer = op.getInitializerBlock()) { 971 llvm::IRBuilder<> builder(llvmModule->getContext()); 972 for (auto &op : initializer->without_terminator()) { 973 if (failed(convertOperation(op, builder)) || 974 !isa<llvm::Constant>(valueMapping.lookup(op.getResult(0)))) 975 return emitError(op.getLoc(), "unemittable constant value"); 976 } 977 ReturnOp ret = cast<ReturnOp>(initializer->getTerminator()); 978 cst = cast<llvm::Constant>(valueMapping.lookup(ret.getOperand(0))); 979 } 980 981 auto linkage = convertLinkageToLLVM(op.linkage()); 982 bool anyExternalLinkage = 983 ((linkage == llvm::GlobalVariable::ExternalLinkage && 984 isa<llvm::UndefValue>(cst)) || 985 linkage == llvm::GlobalVariable::ExternalWeakLinkage); 986 auto addrSpace = op.addr_space(); 987 auto *var = new llvm::GlobalVariable( 988 *llvmModule, type, op.constant(), linkage, 989 anyExternalLinkage ? nullptr : cst, op.sym_name(), 990 /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal, addrSpace); 991 992 globalsMapping.try_emplace(op, var); 993 } 994 995 return success(); 996 } 997 998 /// Attempts to add an attribute identified by `key`, optionally with the given 999 /// `value` to LLVM function `llvmFunc`. Reports errors at `loc` if any. If the 1000 /// attribute has a kind known to LLVM IR, create the attribute of this kind, 1001 /// otherwise keep it as a string attribute. Performs additional checks for 1002 /// attributes known to have or not have a value in order to avoid assertions 1003 /// inside LLVM upon construction. 1004 static LogicalResult checkedAddLLVMFnAttribute(Location loc, 1005 llvm::Function *llvmFunc, 1006 StringRef key, 1007 StringRef value = StringRef()) { 1008 auto kind = llvm::Attribute::getAttrKindFromName(key); 1009 if (kind == llvm::Attribute::None) { 1010 llvmFunc->addFnAttr(key, value); 1011 return success(); 1012 } 1013 1014 if (llvm::Attribute::doesAttrKindHaveArgument(kind)) { 1015 if (value.empty()) 1016 return emitError(loc) << "LLVM attribute '" << key << "' expects a value"; 1017 1018 int result; 1019 if (!value.getAsInteger(/*Radix=*/0, result)) 1020 llvmFunc->addFnAttr( 1021 llvm::Attribute::get(llvmFunc->getContext(), kind, result)); 1022 else 1023 llvmFunc->addFnAttr(key, value); 1024 return success(); 1025 } 1026 1027 if (!value.empty()) 1028 return emitError(loc) << "LLVM attribute '" << key 1029 << "' does not expect a value, found '" << value 1030 << "'"; 1031 1032 llvmFunc->addFnAttr(kind); 1033 return success(); 1034 } 1035 1036 /// Attaches the attributes listed in the given array attribute to `llvmFunc`. 1037 /// Reports error to `loc` if any and returns immediately. Expects `attributes` 1038 /// to be an array attribute containing either string attributes, treated as 1039 /// value-less LLVM attributes, or array attributes containing two string 1040 /// attributes, with the first string being the name of the corresponding LLVM 1041 /// attribute and the second string beings its value. Note that even integer 1042 /// attributes are expected to have their values expressed as strings. 1043 static LogicalResult 1044 forwardPassthroughAttributes(Location loc, Optional<ArrayAttr> attributes, 1045 llvm::Function *llvmFunc) { 1046 if (!attributes) 1047 return success(); 1048 1049 for (Attribute attr : *attributes) { 1050 if (auto stringAttr = attr.dyn_cast<StringAttr>()) { 1051 if (failed( 1052 checkedAddLLVMFnAttribute(loc, llvmFunc, stringAttr.getValue()))) 1053 return failure(); 1054 continue; 1055 } 1056 1057 auto arrayAttr = attr.dyn_cast<ArrayAttr>(); 1058 if (!arrayAttr || arrayAttr.size() != 2) 1059 return emitError(loc) 1060 << "expected 'passthrough' to contain string or array attributes"; 1061 1062 auto keyAttr = arrayAttr[0].dyn_cast<StringAttr>(); 1063 auto valueAttr = arrayAttr[1].dyn_cast<StringAttr>(); 1064 if (!keyAttr || !valueAttr) 1065 return emitError(loc) 1066 << "expected arrays within 'passthrough' to contain two strings"; 1067 1068 if (failed(checkedAddLLVMFnAttribute(loc, llvmFunc, keyAttr.getValue(), 1069 valueAttr.getValue()))) 1070 return failure(); 1071 } 1072 return success(); 1073 } 1074 1075 LogicalResult ModuleTranslation::convertOneFunction(LLVMFuncOp func) { 1076 // Clear the block, branch value mappings, they are only relevant within one 1077 // function. 1078 blockMapping.clear(); 1079 valueMapping.clear(); 1080 branchMapping.clear(); 1081 llvm::Function *llvmFunc = functionMapping.lookup(func.getName()); 1082 1083 // Translate the debug information for this function. 1084 debugTranslation->translate(func, *llvmFunc); 1085 1086 // Add function arguments to the value remapping table. 1087 // If there was noalias info then we decorate each argument accordingly. 1088 unsigned int argIdx = 0; 1089 for (auto kvp : llvm::zip(func.getArguments(), llvmFunc->args())) { 1090 llvm::Argument &llvmArg = std::get<1>(kvp); 1091 BlockArgument mlirArg = std::get<0>(kvp); 1092 1093 if (auto attr = func.getArgAttrOfType<BoolAttr>( 1094 argIdx, LLVMDialect::getNoAliasAttrName())) { 1095 // NB: Attribute already verified to be boolean, so check if we can indeed 1096 // attach the attribute to this argument, based on its type. 1097 auto argTy = mlirArg.getType().dyn_cast<LLVM::LLVMType>(); 1098 if (!argTy.isa<LLVM::LLVMPointerType>()) 1099 return func.emitError( 1100 "llvm.noalias attribute attached to LLVM non-pointer argument"); 1101 if (attr.getValue()) 1102 llvmArg.addAttr(llvm::Attribute::AttrKind::NoAlias); 1103 } 1104 1105 if (auto attr = func.getArgAttrOfType<IntegerAttr>( 1106 argIdx, LLVMDialect::getAlignAttrName())) { 1107 // NB: Attribute already verified to be int, so check if we can indeed 1108 // attach the attribute to this argument, based on its type. 1109 auto argTy = mlirArg.getType().dyn_cast<LLVM::LLVMType>(); 1110 if (!argTy.isa<LLVM::LLVMPointerType>()) 1111 return func.emitError( 1112 "llvm.align attribute attached to LLVM non-pointer argument"); 1113 llvmArg.addAttrs( 1114 llvm::AttrBuilder().addAlignmentAttr(llvm::Align(attr.getInt()))); 1115 } 1116 1117 valueMapping[mlirArg] = &llvmArg; 1118 argIdx++; 1119 } 1120 1121 // Check the personality and set it. 1122 if (func.personality().hasValue()) { 1123 llvm::Type *ty = llvm::Type::getInt8PtrTy(llvmFunc->getContext()); 1124 if (llvm::Constant *pfunc = 1125 getLLVMConstant(ty, func.personalityAttr(), func.getLoc())) 1126 llvmFunc->setPersonalityFn(pfunc); 1127 } 1128 1129 // First, create all blocks so we can jump to them. 1130 llvm::LLVMContext &llvmContext = llvmFunc->getContext(); 1131 for (auto &bb : func) { 1132 auto *llvmBB = llvm::BasicBlock::Create(llvmContext); 1133 llvmBB->insertInto(llvmFunc); 1134 blockMapping[&bb] = llvmBB; 1135 } 1136 1137 // Then, convert blocks one by one in topological order to ensure defs are 1138 // converted before uses. 1139 auto blocks = topologicalSort(func); 1140 for (auto indexedBB : llvm::enumerate(blocks)) { 1141 auto *bb = indexedBB.value(); 1142 if (failed(convertBlock(*bb, /*ignoreArguments=*/indexedBB.index() == 0))) 1143 return failure(); 1144 } 1145 1146 // Finally, after all blocks have been traversed and values mapped, connect 1147 // the PHI nodes to the results of preceding blocks. 1148 connectPHINodes(func, valueMapping, blockMapping, branchMapping); 1149 return success(); 1150 } 1151 1152 LogicalResult ModuleTranslation::checkSupportedModuleOps(Operation *m) { 1153 for (Operation &o : getModuleBody(m).getOperations()) 1154 if (!isa<LLVM::LLVMFuncOp, LLVM::GlobalOp>(&o) && !o.isKnownTerminator()) 1155 return o.emitOpError("unsupported module-level operation"); 1156 return success(); 1157 } 1158 1159 LogicalResult ModuleTranslation::convertFunctionSignatures() { 1160 // Declare all functions first because there may be function calls that form a 1161 // call graph with cycles, or global initializers that reference functions. 1162 for (auto function : getModuleBody(mlirModule).getOps<LLVMFuncOp>()) { 1163 llvm::FunctionCallee llvmFuncCst = llvmModule->getOrInsertFunction( 1164 function.getName(), 1165 cast<llvm::FunctionType>(convertType(function.getType()))); 1166 llvm::Function *llvmFunc = cast<llvm::Function>(llvmFuncCst.getCallee()); 1167 llvmFunc->setLinkage(convertLinkageToLLVM(function.linkage())); 1168 functionMapping[function.getName()] = llvmFunc; 1169 1170 // Forward the pass-through attributes to LLVM. 1171 if (failed(forwardPassthroughAttributes(function.getLoc(), 1172 function.passthrough(), llvmFunc))) 1173 return failure(); 1174 } 1175 1176 return success(); 1177 } 1178 1179 LogicalResult ModuleTranslation::convertFunctions() { 1180 // Convert functions. 1181 for (auto function : getModuleBody(mlirModule).getOps<LLVMFuncOp>()) { 1182 // Ignore external functions. 1183 if (function.isExternal()) 1184 continue; 1185 1186 if (failed(convertOneFunction(function))) 1187 return failure(); 1188 } 1189 1190 return success(); 1191 } 1192 1193 llvm::Type *ModuleTranslation::convertType(LLVMType type) { 1194 return typeTranslator.translateType(type); 1195 } 1196 1197 /// A helper to look up remapped operands in the value remapping table.` 1198 SmallVector<llvm::Value *, 8> 1199 ModuleTranslation::lookupValues(ValueRange values) { 1200 SmallVector<llvm::Value *, 8> remapped; 1201 remapped.reserve(values.size()); 1202 for (Value v : values) { 1203 assert(valueMapping.count(v) && "referencing undefined value"); 1204 remapped.push_back(valueMapping.lookup(v)); 1205 } 1206 return remapped; 1207 } 1208 1209 std::unique_ptr<llvm::Module> ModuleTranslation::prepareLLVMModule( 1210 Operation *m, llvm::LLVMContext &llvmContext, StringRef name) { 1211 m->getContext()->getOrLoadDialect<LLVM::LLVMDialect>(); 1212 auto llvmModule = std::make_unique<llvm::Module>(name, llvmContext); 1213 if (auto dataLayoutAttr = 1214 m->getAttr(LLVM::LLVMDialect::getDataLayoutAttrName())) 1215 llvmModule->setDataLayout(dataLayoutAttr.cast<StringAttr>().getValue()); 1216 if (auto targetTripleAttr = 1217 m->getAttr(LLVM::LLVMDialect::getTargetTripleAttrName())) 1218 llvmModule->setTargetTriple(targetTripleAttr.cast<StringAttr>().getValue()); 1219 1220 // Inject declarations for `malloc` and `free` functions that can be used in 1221 // memref allocation/deallocation coming from standard ops lowering. 1222 llvm::IRBuilder<> builder(llvmContext); 1223 llvmModule->getOrInsertFunction("malloc", builder.getInt8PtrTy(), 1224 builder.getInt64Ty()); 1225 llvmModule->getOrInsertFunction("free", builder.getVoidTy(), 1226 builder.getInt8PtrTy()); 1227 1228 return llvmModule; 1229 } 1230